1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/fs/affs/file.c 4 * 5 * (c) 1996 Hans-Joachim Widmaier - Rewritten 6 * 7 * (C) 1993 Ray Burr - Modified for Amiga FFS filesystem. 8 * 9 * (C) 1992 Eric Youngdale Modified for ISO 9660 filesystem. 10 * 11 * (C) 1991 Linus Torvalds - minix filesystem 12 * 13 * affs regular file handling primitives 14 */ 15 16 #include <linux/uio.h> 17 #include "affs.h" 18 19 static struct buffer_head *affs_get_extblock_slow(struct inode *inode, u32 ext); 20 21 static int 22 affs_file_open(struct inode *inode, struct file *filp) 23 { 24 pr_debug("open(%lu,%d)\n", 25 inode->i_ino, atomic_read(&AFFS_I(inode)->i_opencnt)); 26 atomic_inc(&AFFS_I(inode)->i_opencnt); 27 return 0; 28 } 29 30 static int 31 affs_file_release(struct inode *inode, struct file *filp) 32 { 33 pr_debug("release(%lu, %d)\n", 34 inode->i_ino, atomic_read(&AFFS_I(inode)->i_opencnt)); 35 36 if (atomic_dec_and_test(&AFFS_I(inode)->i_opencnt)) { 37 inode_lock(inode); 38 if (inode->i_size != AFFS_I(inode)->mmu_private) 39 affs_truncate(inode); 40 affs_free_prealloc(inode); 41 inode_unlock(inode); 42 } 43 44 return 0; 45 } 46 47 static int 48 affs_grow_extcache(struct inode *inode, u32 lc_idx) 49 { 50 struct super_block *sb = inode->i_sb; 51 struct buffer_head *bh; 52 u32 lc_max; 53 int i, j, key; 54 55 if (!AFFS_I(inode)->i_lc) { 56 char *ptr = (char *)get_zeroed_page(GFP_NOFS); 57 if (!ptr) 58 return -ENOMEM; 59 AFFS_I(inode)->i_lc = (u32 *)ptr; 60 AFFS_I(inode)->i_ac = (struct affs_ext_key *)(ptr + AFFS_CACHE_SIZE / 2); 61 } 62 63 lc_max = AFFS_LC_SIZE << AFFS_I(inode)->i_lc_shift; 64 65 if (AFFS_I(inode)->i_extcnt > lc_max) { 66 u32 lc_shift, lc_mask, tmp, off; 67 68 /* need to recalculate linear cache, start from old size */ 69 lc_shift = AFFS_I(inode)->i_lc_shift; 70 tmp = (AFFS_I(inode)->i_extcnt / AFFS_LC_SIZE) >> lc_shift; 71 for (; tmp; tmp >>= 1) 72 lc_shift++; 73 lc_mask = (1 << lc_shift) - 1; 74 75 /* fix idx and old size to new shift */ 76 lc_idx >>= (lc_shift - AFFS_I(inode)->i_lc_shift); 77 AFFS_I(inode)->i_lc_size >>= (lc_shift - AFFS_I(inode)->i_lc_shift); 78 79 /* first shrink old cache to make more space */ 80 off = 1 << (lc_shift - AFFS_I(inode)->i_lc_shift); 81 for (i = 1, j = off; j < AFFS_LC_SIZE; i++, j += off) 82 AFFS_I(inode)->i_ac[i] = AFFS_I(inode)->i_ac[j]; 83 84 AFFS_I(inode)->i_lc_shift = lc_shift; 85 AFFS_I(inode)->i_lc_mask = lc_mask; 86 } 87 88 /* fill cache to the needed index */ 89 i = AFFS_I(inode)->i_lc_size; 90 AFFS_I(inode)->i_lc_size = lc_idx + 1; 91 for (; i <= lc_idx; i++) { 92 if (!i) { 93 AFFS_I(inode)->i_lc[0] = inode->i_ino; 94 continue; 95 } 96 key = AFFS_I(inode)->i_lc[i - 1]; 97 j = AFFS_I(inode)->i_lc_mask + 1; 98 // unlock cache 99 for (; j > 0; j--) { 100 bh = affs_bread(sb, key); 101 if (!bh) 102 goto err; 103 key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension); 104 affs_brelse(bh); 105 } 106 // lock cache 107 AFFS_I(inode)->i_lc[i] = key; 108 } 109 110 return 0; 111 112 err: 113 // lock cache 114 return -EIO; 115 } 116 117 static struct buffer_head * 118 affs_alloc_extblock(struct inode *inode, struct buffer_head *bh, u32 ext) 119 { 120 struct super_block *sb = inode->i_sb; 121 struct buffer_head *new_bh; 122 u32 blocknr, tmp; 123 124 blocknr = affs_alloc_block(inode, bh->b_blocknr); 125 if (!blocknr) 126 return ERR_PTR(-ENOSPC); 127 128 new_bh = affs_getzeroblk(sb, blocknr); 129 if (!new_bh) { 130 affs_free_block(sb, blocknr); 131 return ERR_PTR(-EIO); 132 } 133 134 AFFS_HEAD(new_bh)->ptype = cpu_to_be32(T_LIST); 135 AFFS_HEAD(new_bh)->key = cpu_to_be32(blocknr); 136 AFFS_TAIL(sb, new_bh)->stype = cpu_to_be32(ST_FILE); 137 AFFS_TAIL(sb, new_bh)->parent = cpu_to_be32(inode->i_ino); 138 affs_fix_checksum(sb, new_bh); 139 140 mark_buffer_dirty_inode(new_bh, inode); 141 142 tmp = be32_to_cpu(AFFS_TAIL(sb, bh)->extension); 143 if (tmp) 144 affs_warning(sb, "alloc_ext", "previous extension set (%x)", tmp); 145 AFFS_TAIL(sb, bh)->extension = cpu_to_be32(blocknr); 146 affs_adjust_checksum(bh, blocknr - tmp); 147 mark_buffer_dirty_inode(bh, inode); 148 149 AFFS_I(inode)->i_extcnt++; 150 mark_inode_dirty(inode); 151 152 return new_bh; 153 } 154 155 static inline struct buffer_head * 156 affs_get_extblock(struct inode *inode, u32 ext) 157 { 158 /* inline the simplest case: same extended block as last time */ 159 struct buffer_head *bh = AFFS_I(inode)->i_ext_bh; 160 if (ext == AFFS_I(inode)->i_ext_last) 161 get_bh(bh); 162 else 163 /* we have to do more (not inlined) */ 164 bh = affs_get_extblock_slow(inode, ext); 165 166 return bh; 167 } 168 169 static struct buffer_head * 170 affs_get_extblock_slow(struct inode *inode, u32 ext) 171 { 172 struct super_block *sb = inode->i_sb; 173 struct buffer_head *bh; 174 u32 ext_key; 175 u32 lc_idx, lc_off, ac_idx; 176 u32 tmp, idx; 177 178 if (ext == AFFS_I(inode)->i_ext_last + 1) { 179 /* read the next extended block from the current one */ 180 bh = AFFS_I(inode)->i_ext_bh; 181 ext_key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension); 182 if (ext < AFFS_I(inode)->i_extcnt) 183 goto read_ext; 184 BUG_ON(ext > AFFS_I(inode)->i_extcnt); 185 bh = affs_alloc_extblock(inode, bh, ext); 186 if (IS_ERR(bh)) 187 return bh; 188 goto store_ext; 189 } 190 191 if (ext == 0) { 192 /* we seek back to the file header block */ 193 ext_key = inode->i_ino; 194 goto read_ext; 195 } 196 197 if (ext >= AFFS_I(inode)->i_extcnt) { 198 struct buffer_head *prev_bh; 199 200 /* allocate a new extended block */ 201 BUG_ON(ext > AFFS_I(inode)->i_extcnt); 202 203 /* get previous extended block */ 204 prev_bh = affs_get_extblock(inode, ext - 1); 205 if (IS_ERR(prev_bh)) 206 return prev_bh; 207 bh = affs_alloc_extblock(inode, prev_bh, ext); 208 affs_brelse(prev_bh); 209 if (IS_ERR(bh)) 210 return bh; 211 goto store_ext; 212 } 213 214 again: 215 /* check if there is an extended cache and whether it's large enough */ 216 lc_idx = ext >> AFFS_I(inode)->i_lc_shift; 217 lc_off = ext & AFFS_I(inode)->i_lc_mask; 218 219 if (lc_idx >= AFFS_I(inode)->i_lc_size) { 220 int err; 221 222 err = affs_grow_extcache(inode, lc_idx); 223 if (err) 224 return ERR_PTR(err); 225 goto again; 226 } 227 228 /* every n'th key we find in the linear cache */ 229 if (!lc_off) { 230 ext_key = AFFS_I(inode)->i_lc[lc_idx]; 231 goto read_ext; 232 } 233 234 /* maybe it's still in the associative cache */ 235 ac_idx = (ext - lc_idx - 1) & AFFS_AC_MASK; 236 if (AFFS_I(inode)->i_ac[ac_idx].ext == ext) { 237 ext_key = AFFS_I(inode)->i_ac[ac_idx].key; 238 goto read_ext; 239 } 240 241 /* try to find one of the previous extended blocks */ 242 tmp = ext; 243 idx = ac_idx; 244 while (--tmp, --lc_off > 0) { 245 idx = (idx - 1) & AFFS_AC_MASK; 246 if (AFFS_I(inode)->i_ac[idx].ext == tmp) { 247 ext_key = AFFS_I(inode)->i_ac[idx].key; 248 goto find_ext; 249 } 250 } 251 252 /* fall back to the linear cache */ 253 ext_key = AFFS_I(inode)->i_lc[lc_idx]; 254 find_ext: 255 /* read all extended blocks until we find the one we need */ 256 //unlock cache 257 do { 258 bh = affs_bread(sb, ext_key); 259 if (!bh) 260 goto err_bread; 261 ext_key = be32_to_cpu(AFFS_TAIL(sb, bh)->extension); 262 affs_brelse(bh); 263 tmp++; 264 } while (tmp < ext); 265 //lock cache 266 267 /* store it in the associative cache */ 268 // recalculate ac_idx? 269 AFFS_I(inode)->i_ac[ac_idx].ext = ext; 270 AFFS_I(inode)->i_ac[ac_idx].key = ext_key; 271 272 read_ext: 273 /* finally read the right extended block */ 274 //unlock cache 275 bh = affs_bread(sb, ext_key); 276 if (!bh) 277 goto err_bread; 278 //lock cache 279 280 store_ext: 281 /* release old cached extended block and store the new one */ 282 affs_brelse(AFFS_I(inode)->i_ext_bh); 283 AFFS_I(inode)->i_ext_last = ext; 284 AFFS_I(inode)->i_ext_bh = bh; 285 get_bh(bh); 286 287 return bh; 288 289 err_bread: 290 affs_brelse(bh); 291 return ERR_PTR(-EIO); 292 } 293 294 static int 295 affs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create) 296 { 297 struct super_block *sb = inode->i_sb; 298 struct buffer_head *ext_bh; 299 u32 ext; 300 301 pr_debug("%s(%lu, %llu)\n", __func__, inode->i_ino, 302 (unsigned long long)block); 303 304 BUG_ON(block > (sector_t)0x7fffffffUL); 305 306 if (block >= AFFS_I(inode)->i_blkcnt) { 307 if (block > AFFS_I(inode)->i_blkcnt || !create) 308 goto err_big; 309 } else 310 create = 0; 311 312 //lock cache 313 affs_lock_ext(inode); 314 315 ext = (u32)block / AFFS_SB(sb)->s_hashsize; 316 block -= ext * AFFS_SB(sb)->s_hashsize; 317 ext_bh = affs_get_extblock(inode, ext); 318 if (IS_ERR(ext_bh)) 319 goto err_ext; 320 map_bh(bh_result, sb, (sector_t)be32_to_cpu(AFFS_BLOCK(sb, ext_bh, block))); 321 322 if (create) { 323 u32 blocknr = affs_alloc_block(inode, ext_bh->b_blocknr); 324 if (!blocknr) 325 goto err_alloc; 326 set_buffer_new(bh_result); 327 AFFS_I(inode)->mmu_private += AFFS_SB(sb)->s_data_blksize; 328 AFFS_I(inode)->i_blkcnt++; 329 330 /* store new block */ 331 if (bh_result->b_blocknr) 332 affs_warning(sb, "get_block", 333 "block already set (%llx)", 334 (unsigned long long)bh_result->b_blocknr); 335 AFFS_BLOCK(sb, ext_bh, block) = cpu_to_be32(blocknr); 336 AFFS_HEAD(ext_bh)->block_count = cpu_to_be32(block + 1); 337 affs_adjust_checksum(ext_bh, blocknr - bh_result->b_blocknr + 1); 338 bh_result->b_blocknr = blocknr; 339 340 if (!block) { 341 /* insert first block into header block */ 342 u32 tmp = be32_to_cpu(AFFS_HEAD(ext_bh)->first_data); 343 if (tmp) 344 affs_warning(sb, "get_block", "first block already set (%d)", tmp); 345 AFFS_HEAD(ext_bh)->first_data = cpu_to_be32(blocknr); 346 affs_adjust_checksum(ext_bh, blocknr - tmp); 347 } 348 } 349 350 affs_brelse(ext_bh); 351 //unlock cache 352 affs_unlock_ext(inode); 353 return 0; 354 355 err_big: 356 affs_error(inode->i_sb, "get_block", "strange block request %llu", 357 (unsigned long long)block); 358 return -EIO; 359 err_ext: 360 // unlock cache 361 affs_unlock_ext(inode); 362 return PTR_ERR(ext_bh); 363 err_alloc: 364 brelse(ext_bh); 365 clear_buffer_mapped(bh_result); 366 bh_result->b_bdev = NULL; 367 // unlock cache 368 affs_unlock_ext(inode); 369 return -ENOSPC; 370 } 371 372 static int affs_writepage(struct page *page, struct writeback_control *wbc) 373 { 374 return block_write_full_page(page, affs_get_block, wbc); 375 } 376 377 static int affs_readpage(struct file *file, struct page *page) 378 { 379 return block_read_full_page(page, affs_get_block); 380 } 381 382 static void affs_write_failed(struct address_space *mapping, loff_t to) 383 { 384 struct inode *inode = mapping->host; 385 386 if (to > inode->i_size) { 387 truncate_pagecache(inode, inode->i_size); 388 affs_truncate(inode); 389 } 390 } 391 392 static ssize_t 393 affs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) 394 { 395 struct file *file = iocb->ki_filp; 396 struct address_space *mapping = file->f_mapping; 397 struct inode *inode = mapping->host; 398 size_t count = iov_iter_count(iter); 399 loff_t offset = iocb->ki_pos; 400 ssize_t ret; 401 402 if (iov_iter_rw(iter) == WRITE) { 403 loff_t size = offset + count; 404 405 if (AFFS_I(inode)->mmu_private < size) 406 return 0; 407 } 408 409 ret = blockdev_direct_IO(iocb, inode, iter, affs_get_block); 410 if (ret < 0 && iov_iter_rw(iter) == WRITE) 411 affs_write_failed(mapping, offset + count); 412 return ret; 413 } 414 415 static int affs_write_begin(struct file *file, struct address_space *mapping, 416 loff_t pos, unsigned len, unsigned flags, 417 struct page **pagep, void **fsdata) 418 { 419 int ret; 420 421 *pagep = NULL; 422 ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata, 423 affs_get_block, 424 &AFFS_I(mapping->host)->mmu_private); 425 if (unlikely(ret)) 426 affs_write_failed(mapping, pos + len); 427 428 return ret; 429 } 430 431 static sector_t _affs_bmap(struct address_space *mapping, sector_t block) 432 { 433 return generic_block_bmap(mapping,block,affs_get_block); 434 } 435 436 const struct address_space_operations affs_aops = { 437 .readpage = affs_readpage, 438 .writepage = affs_writepage, 439 .write_begin = affs_write_begin, 440 .write_end = generic_write_end, 441 .direct_IO = affs_direct_IO, 442 .bmap = _affs_bmap 443 }; 444 445 static inline struct buffer_head * 446 affs_bread_ino(struct inode *inode, int block, int create) 447 { 448 struct buffer_head *bh, tmp_bh; 449 int err; 450 451 tmp_bh.b_state = 0; 452 err = affs_get_block(inode, block, &tmp_bh, create); 453 if (!err) { 454 bh = affs_bread(inode->i_sb, tmp_bh.b_blocknr); 455 if (bh) { 456 bh->b_state |= tmp_bh.b_state; 457 return bh; 458 } 459 err = -EIO; 460 } 461 return ERR_PTR(err); 462 } 463 464 static inline struct buffer_head * 465 affs_getzeroblk_ino(struct inode *inode, int block) 466 { 467 struct buffer_head *bh, tmp_bh; 468 int err; 469 470 tmp_bh.b_state = 0; 471 err = affs_get_block(inode, block, &tmp_bh, 1); 472 if (!err) { 473 bh = affs_getzeroblk(inode->i_sb, tmp_bh.b_blocknr); 474 if (bh) { 475 bh->b_state |= tmp_bh.b_state; 476 return bh; 477 } 478 err = -EIO; 479 } 480 return ERR_PTR(err); 481 } 482 483 static inline struct buffer_head * 484 affs_getemptyblk_ino(struct inode *inode, int block) 485 { 486 struct buffer_head *bh, tmp_bh; 487 int err; 488 489 tmp_bh.b_state = 0; 490 err = affs_get_block(inode, block, &tmp_bh, 1); 491 if (!err) { 492 bh = affs_getemptyblk(inode->i_sb, tmp_bh.b_blocknr); 493 if (bh) { 494 bh->b_state |= tmp_bh.b_state; 495 return bh; 496 } 497 err = -EIO; 498 } 499 return ERR_PTR(err); 500 } 501 502 static int 503 affs_do_readpage_ofs(struct page *page, unsigned to, int create) 504 { 505 struct inode *inode = page->mapping->host; 506 struct super_block *sb = inode->i_sb; 507 struct buffer_head *bh; 508 char *data; 509 unsigned pos = 0; 510 u32 bidx, boff, bsize; 511 u32 tmp; 512 513 pr_debug("%s(%lu, %ld, 0, %d)\n", __func__, inode->i_ino, 514 page->index, to); 515 BUG_ON(to > PAGE_SIZE); 516 bsize = AFFS_SB(sb)->s_data_blksize; 517 tmp = page->index << PAGE_SHIFT; 518 bidx = tmp / bsize; 519 boff = tmp % bsize; 520 521 while (pos < to) { 522 bh = affs_bread_ino(inode, bidx, create); 523 if (IS_ERR(bh)) 524 return PTR_ERR(bh); 525 tmp = min(bsize - boff, to - pos); 526 BUG_ON(pos + tmp > to || tmp > bsize); 527 data = kmap_atomic(page); 528 memcpy(data + pos, AFFS_DATA(bh) + boff, tmp); 529 kunmap_atomic(data); 530 affs_brelse(bh); 531 bidx++; 532 pos += tmp; 533 boff = 0; 534 } 535 flush_dcache_page(page); 536 return 0; 537 } 538 539 static int 540 affs_extent_file_ofs(struct inode *inode, u32 newsize) 541 { 542 struct super_block *sb = inode->i_sb; 543 struct buffer_head *bh, *prev_bh; 544 u32 bidx, boff; 545 u32 size, bsize; 546 u32 tmp; 547 548 pr_debug("%s(%lu, %d)\n", __func__, inode->i_ino, newsize); 549 bsize = AFFS_SB(sb)->s_data_blksize; 550 bh = NULL; 551 size = AFFS_I(inode)->mmu_private; 552 bidx = size / bsize; 553 boff = size % bsize; 554 if (boff) { 555 bh = affs_bread_ino(inode, bidx, 0); 556 if (IS_ERR(bh)) 557 return PTR_ERR(bh); 558 tmp = min(bsize - boff, newsize - size); 559 BUG_ON(boff + tmp > bsize || tmp > bsize); 560 memset(AFFS_DATA(bh) + boff, 0, tmp); 561 be32_add_cpu(&AFFS_DATA_HEAD(bh)->size, tmp); 562 affs_fix_checksum(sb, bh); 563 mark_buffer_dirty_inode(bh, inode); 564 size += tmp; 565 bidx++; 566 } else if (bidx) { 567 bh = affs_bread_ino(inode, bidx - 1, 0); 568 if (IS_ERR(bh)) 569 return PTR_ERR(bh); 570 } 571 572 while (size < newsize) { 573 prev_bh = bh; 574 bh = affs_getzeroblk_ino(inode, bidx); 575 if (IS_ERR(bh)) 576 goto out; 577 tmp = min(bsize, newsize - size); 578 BUG_ON(tmp > bsize); 579 AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA); 580 AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino); 581 AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx); 582 AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp); 583 affs_fix_checksum(sb, bh); 584 bh->b_state &= ~(1UL << BH_New); 585 mark_buffer_dirty_inode(bh, inode); 586 if (prev_bh) { 587 u32 tmp_next = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next); 588 589 if (tmp_next) 590 affs_warning(sb, "extent_file_ofs", 591 "next block already set for %d (%d)", 592 bidx, tmp_next); 593 AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr); 594 affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp_next); 595 mark_buffer_dirty_inode(prev_bh, inode); 596 affs_brelse(prev_bh); 597 } 598 size += bsize; 599 bidx++; 600 } 601 affs_brelse(bh); 602 inode->i_size = AFFS_I(inode)->mmu_private = newsize; 603 return 0; 604 605 out: 606 inode->i_size = AFFS_I(inode)->mmu_private = newsize; 607 return PTR_ERR(bh); 608 } 609 610 static int 611 affs_readpage_ofs(struct file *file, struct page *page) 612 { 613 struct inode *inode = page->mapping->host; 614 u32 to; 615 int err; 616 617 pr_debug("%s(%lu, %ld)\n", __func__, inode->i_ino, page->index); 618 to = PAGE_SIZE; 619 if (((page->index + 1) << PAGE_SHIFT) > inode->i_size) { 620 to = inode->i_size & ~PAGE_MASK; 621 memset(page_address(page) + to, 0, PAGE_SIZE - to); 622 } 623 624 err = affs_do_readpage_ofs(page, to, 0); 625 if (!err) 626 SetPageUptodate(page); 627 unlock_page(page); 628 return err; 629 } 630 631 static int affs_write_begin_ofs(struct file *file, struct address_space *mapping, 632 loff_t pos, unsigned len, unsigned flags, 633 struct page **pagep, void **fsdata) 634 { 635 struct inode *inode = mapping->host; 636 struct page *page; 637 pgoff_t index; 638 int err = 0; 639 640 pr_debug("%s(%lu, %llu, %llu)\n", __func__, inode->i_ino, pos, 641 pos + len); 642 if (pos > AFFS_I(inode)->mmu_private) { 643 /* XXX: this probably leaves a too-big i_size in case of 644 * failure. Should really be updating i_size at write_end time 645 */ 646 err = affs_extent_file_ofs(inode, pos); 647 if (err) 648 return err; 649 } 650 651 index = pos >> PAGE_SHIFT; 652 page = grab_cache_page_write_begin(mapping, index, flags); 653 if (!page) 654 return -ENOMEM; 655 *pagep = page; 656 657 if (PageUptodate(page)) 658 return 0; 659 660 /* XXX: inefficient but safe in the face of short writes */ 661 err = affs_do_readpage_ofs(page, PAGE_SIZE, 1); 662 if (err) { 663 unlock_page(page); 664 put_page(page); 665 } 666 return err; 667 } 668 669 static int affs_write_end_ofs(struct file *file, struct address_space *mapping, 670 loff_t pos, unsigned len, unsigned copied, 671 struct page *page, void *fsdata) 672 { 673 struct inode *inode = mapping->host; 674 struct super_block *sb = inode->i_sb; 675 struct buffer_head *bh, *prev_bh; 676 char *data; 677 u32 bidx, boff, bsize; 678 unsigned from, to; 679 u32 tmp; 680 int written; 681 682 from = pos & (PAGE_SIZE - 1); 683 to = from + len; 684 /* 685 * XXX: not sure if this can handle short copies (len < copied), but 686 * we don't have to, because the page should always be uptodate here, 687 * due to write_begin. 688 */ 689 690 pr_debug("%s(%lu, %llu, %llu)\n", __func__, inode->i_ino, pos, 691 pos + len); 692 bsize = AFFS_SB(sb)->s_data_blksize; 693 data = page_address(page); 694 695 bh = NULL; 696 written = 0; 697 tmp = (page->index << PAGE_SHIFT) + from; 698 bidx = tmp / bsize; 699 boff = tmp % bsize; 700 if (boff) { 701 bh = affs_bread_ino(inode, bidx, 0); 702 if (IS_ERR(bh)) { 703 written = PTR_ERR(bh); 704 goto err_first_bh; 705 } 706 tmp = min(bsize - boff, to - from); 707 BUG_ON(boff + tmp > bsize || tmp > bsize); 708 memcpy(AFFS_DATA(bh) + boff, data + from, tmp); 709 be32_add_cpu(&AFFS_DATA_HEAD(bh)->size, tmp); 710 affs_fix_checksum(sb, bh); 711 mark_buffer_dirty_inode(bh, inode); 712 written += tmp; 713 from += tmp; 714 bidx++; 715 } else if (bidx) { 716 bh = affs_bread_ino(inode, bidx - 1, 0); 717 if (IS_ERR(bh)) { 718 written = PTR_ERR(bh); 719 goto err_first_bh; 720 } 721 } 722 while (from + bsize <= to) { 723 prev_bh = bh; 724 bh = affs_getemptyblk_ino(inode, bidx); 725 if (IS_ERR(bh)) 726 goto err_bh; 727 memcpy(AFFS_DATA(bh), data + from, bsize); 728 if (buffer_new(bh)) { 729 AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA); 730 AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino); 731 AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx); 732 AFFS_DATA_HEAD(bh)->size = cpu_to_be32(bsize); 733 AFFS_DATA_HEAD(bh)->next = 0; 734 bh->b_state &= ~(1UL << BH_New); 735 if (prev_bh) { 736 u32 tmp_next = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next); 737 738 if (tmp_next) 739 affs_warning(sb, "commit_write_ofs", 740 "next block already set for %d (%d)", 741 bidx, tmp_next); 742 AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr); 743 affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp_next); 744 mark_buffer_dirty_inode(prev_bh, inode); 745 } 746 } 747 affs_brelse(prev_bh); 748 affs_fix_checksum(sb, bh); 749 mark_buffer_dirty_inode(bh, inode); 750 written += bsize; 751 from += bsize; 752 bidx++; 753 } 754 if (from < to) { 755 prev_bh = bh; 756 bh = affs_bread_ino(inode, bidx, 1); 757 if (IS_ERR(bh)) 758 goto err_bh; 759 tmp = min(bsize, to - from); 760 BUG_ON(tmp > bsize); 761 memcpy(AFFS_DATA(bh), data + from, tmp); 762 if (buffer_new(bh)) { 763 AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA); 764 AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino); 765 AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx); 766 AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp); 767 AFFS_DATA_HEAD(bh)->next = 0; 768 bh->b_state &= ~(1UL << BH_New); 769 if (prev_bh) { 770 u32 tmp_next = be32_to_cpu(AFFS_DATA_HEAD(prev_bh)->next); 771 772 if (tmp_next) 773 affs_warning(sb, "commit_write_ofs", 774 "next block already set for %d (%d)", 775 bidx, tmp_next); 776 AFFS_DATA_HEAD(prev_bh)->next = cpu_to_be32(bh->b_blocknr); 777 affs_adjust_checksum(prev_bh, bh->b_blocknr - tmp_next); 778 mark_buffer_dirty_inode(prev_bh, inode); 779 } 780 } else if (be32_to_cpu(AFFS_DATA_HEAD(bh)->size) < tmp) 781 AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp); 782 affs_brelse(prev_bh); 783 affs_fix_checksum(sb, bh); 784 mark_buffer_dirty_inode(bh, inode); 785 written += tmp; 786 from += tmp; 787 bidx++; 788 } 789 SetPageUptodate(page); 790 791 done: 792 affs_brelse(bh); 793 tmp = (page->index << PAGE_SHIFT) + from; 794 if (tmp > inode->i_size) 795 inode->i_size = AFFS_I(inode)->mmu_private = tmp; 796 797 err_first_bh: 798 unlock_page(page); 799 put_page(page); 800 801 return written; 802 803 err_bh: 804 bh = prev_bh; 805 if (!written) 806 written = PTR_ERR(bh); 807 goto done; 808 } 809 810 const struct address_space_operations affs_aops_ofs = { 811 .readpage = affs_readpage_ofs, 812 //.writepage = affs_writepage_ofs, 813 .write_begin = affs_write_begin_ofs, 814 .write_end = affs_write_end_ofs 815 }; 816 817 /* Free any preallocated blocks. */ 818 819 void 820 affs_free_prealloc(struct inode *inode) 821 { 822 struct super_block *sb = inode->i_sb; 823 824 pr_debug("free_prealloc(ino=%lu)\n", inode->i_ino); 825 826 while (AFFS_I(inode)->i_pa_cnt) { 827 AFFS_I(inode)->i_pa_cnt--; 828 affs_free_block(sb, ++AFFS_I(inode)->i_lastalloc); 829 } 830 } 831 832 /* Truncate (or enlarge) a file to the requested size. */ 833 834 void 835 affs_truncate(struct inode *inode) 836 { 837 struct super_block *sb = inode->i_sb; 838 u32 ext, ext_key; 839 u32 last_blk, blkcnt, blk; 840 u32 size; 841 struct buffer_head *ext_bh; 842 int i; 843 844 pr_debug("truncate(inode=%lu, oldsize=%llu, newsize=%llu)\n", 845 inode->i_ino, AFFS_I(inode)->mmu_private, inode->i_size); 846 847 last_blk = 0; 848 ext = 0; 849 if (inode->i_size) { 850 last_blk = ((u32)inode->i_size - 1) / AFFS_SB(sb)->s_data_blksize; 851 ext = last_blk / AFFS_SB(sb)->s_hashsize; 852 } 853 854 if (inode->i_size > AFFS_I(inode)->mmu_private) { 855 struct address_space *mapping = inode->i_mapping; 856 struct page *page; 857 void *fsdata; 858 loff_t isize = inode->i_size; 859 int res; 860 861 res = mapping->a_ops->write_begin(NULL, mapping, isize, 0, 0, &page, &fsdata); 862 if (!res) 863 res = mapping->a_ops->write_end(NULL, mapping, isize, 0, 0, page, fsdata); 864 else 865 inode->i_size = AFFS_I(inode)->mmu_private; 866 mark_inode_dirty(inode); 867 return; 868 } else if (inode->i_size == AFFS_I(inode)->mmu_private) 869 return; 870 871 // lock cache 872 ext_bh = affs_get_extblock(inode, ext); 873 if (IS_ERR(ext_bh)) { 874 affs_warning(sb, "truncate", 875 "unexpected read error for ext block %u (%ld)", 876 ext, PTR_ERR(ext_bh)); 877 return; 878 } 879 if (AFFS_I(inode)->i_lc) { 880 /* clear linear cache */ 881 i = (ext + 1) >> AFFS_I(inode)->i_lc_shift; 882 if (AFFS_I(inode)->i_lc_size > i) { 883 AFFS_I(inode)->i_lc_size = i; 884 for (; i < AFFS_LC_SIZE; i++) 885 AFFS_I(inode)->i_lc[i] = 0; 886 } 887 /* clear associative cache */ 888 for (i = 0; i < AFFS_AC_SIZE; i++) 889 if (AFFS_I(inode)->i_ac[i].ext >= ext) 890 AFFS_I(inode)->i_ac[i].ext = 0; 891 } 892 ext_key = be32_to_cpu(AFFS_TAIL(sb, ext_bh)->extension); 893 894 blkcnt = AFFS_I(inode)->i_blkcnt; 895 i = 0; 896 blk = last_blk; 897 if (inode->i_size) { 898 i = last_blk % AFFS_SB(sb)->s_hashsize + 1; 899 blk++; 900 } else 901 AFFS_HEAD(ext_bh)->first_data = 0; 902 AFFS_HEAD(ext_bh)->block_count = cpu_to_be32(i); 903 size = AFFS_SB(sb)->s_hashsize; 904 if (size > blkcnt - blk + i) 905 size = blkcnt - blk + i; 906 for (; i < size; i++, blk++) { 907 affs_free_block(sb, be32_to_cpu(AFFS_BLOCK(sb, ext_bh, i))); 908 AFFS_BLOCK(sb, ext_bh, i) = 0; 909 } 910 AFFS_TAIL(sb, ext_bh)->extension = 0; 911 affs_fix_checksum(sb, ext_bh); 912 mark_buffer_dirty_inode(ext_bh, inode); 913 affs_brelse(ext_bh); 914 915 if (inode->i_size) { 916 AFFS_I(inode)->i_blkcnt = last_blk + 1; 917 AFFS_I(inode)->i_extcnt = ext + 1; 918 if (affs_test_opt(AFFS_SB(sb)->s_flags, SF_OFS)) { 919 struct buffer_head *bh = affs_bread_ino(inode, last_blk, 0); 920 u32 tmp; 921 if (IS_ERR(bh)) { 922 affs_warning(sb, "truncate", 923 "unexpected read error for last block %u (%ld)", 924 ext, PTR_ERR(bh)); 925 return; 926 } 927 tmp = be32_to_cpu(AFFS_DATA_HEAD(bh)->next); 928 AFFS_DATA_HEAD(bh)->next = 0; 929 affs_adjust_checksum(bh, -tmp); 930 affs_brelse(bh); 931 } 932 } else { 933 AFFS_I(inode)->i_blkcnt = 0; 934 AFFS_I(inode)->i_extcnt = 1; 935 } 936 AFFS_I(inode)->mmu_private = inode->i_size; 937 // unlock cache 938 939 while (ext_key) { 940 ext_bh = affs_bread(sb, ext_key); 941 size = AFFS_SB(sb)->s_hashsize; 942 if (size > blkcnt - blk) 943 size = blkcnt - blk; 944 for (i = 0; i < size; i++, blk++) 945 affs_free_block(sb, be32_to_cpu(AFFS_BLOCK(sb, ext_bh, i))); 946 affs_free_block(sb, ext_key); 947 ext_key = be32_to_cpu(AFFS_TAIL(sb, ext_bh)->extension); 948 affs_brelse(ext_bh); 949 } 950 affs_free_prealloc(inode); 951 } 952 953 int affs_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync) 954 { 955 struct inode *inode = filp->f_mapping->host; 956 int ret, err; 957 958 err = file_write_and_wait_range(filp, start, end); 959 if (err) 960 return err; 961 962 inode_lock(inode); 963 ret = write_inode_now(inode, 0); 964 err = sync_blockdev(inode->i_sb->s_bdev); 965 if (!ret) 966 ret = err; 967 inode_unlock(inode); 968 return ret; 969 } 970 const struct file_operations affs_file_operations = { 971 .llseek = generic_file_llseek, 972 .read_iter = generic_file_read_iter, 973 .write_iter = generic_file_write_iter, 974 .mmap = generic_file_mmap, 975 .open = affs_file_open, 976 .release = affs_file_release, 977 .fsync = affs_file_fsync, 978 .splice_read = generic_file_splice_read, 979 }; 980 981 const struct inode_operations affs_file_inode_operations = { 982 .setattr = affs_notify_change, 983 }; 984