1 /* 2 * linux/fs/hfsplus/extents.c 3 * 4 * Copyright (C) 2001 5 * Brad Boyer (flar@allandria.com) 6 * (C) 2003 Ardis Technologies <roman@ardistech.com> 7 * 8 * Handling of Extents both in catalog and extents overflow trees 9 */ 10 11 #include <linux/errno.h> 12 #include <linux/fs.h> 13 #include <linux/pagemap.h> 14 15 #include "hfsplus_fs.h" 16 #include "hfsplus_raw.h" 17 18 /* Compare two extents keys, returns 0 on same, pos/neg for difference */ 19 int hfsplus_ext_cmp_key(const hfsplus_btree_key *k1, 20 const hfsplus_btree_key *k2) 21 { 22 __be32 k1id, k2id; 23 __be32 k1s, k2s; 24 25 k1id = k1->ext.cnid; 26 k2id = k2->ext.cnid; 27 if (k1id != k2id) 28 return be32_to_cpu(k1id) < be32_to_cpu(k2id) ? -1 : 1; 29 30 if (k1->ext.fork_type != k2->ext.fork_type) 31 return k1->ext.fork_type < k2->ext.fork_type ? -1 : 1; 32 33 k1s = k1->ext.start_block; 34 k2s = k2->ext.start_block; 35 if (k1s == k2s) 36 return 0; 37 return be32_to_cpu(k1s) < be32_to_cpu(k2s) ? -1 : 1; 38 } 39 40 static void hfsplus_ext_build_key(hfsplus_btree_key *key, u32 cnid, 41 u32 block, u8 type) 42 { 43 key->key_len = cpu_to_be16(HFSPLUS_EXT_KEYLEN - 2); 44 key->ext.cnid = cpu_to_be32(cnid); 45 key->ext.start_block = cpu_to_be32(block); 46 key->ext.fork_type = type; 47 key->ext.pad = 0; 48 } 49 50 static u32 hfsplus_ext_find_block(struct hfsplus_extent *ext, u32 off) 51 { 52 int i; 53 u32 count; 54 55 for (i = 0; i < 8; ext++, i++) { 56 count = be32_to_cpu(ext->block_count); 57 if (off < count) 58 return be32_to_cpu(ext->start_block) + off; 59 off -= count; 60 } 61 /* panic? */ 62 return 0; 63 } 64 65 static int hfsplus_ext_block_count(struct hfsplus_extent *ext) 66 { 67 int i; 68 u32 count = 0; 69 70 for (i = 0; i < 8; ext++, i++) 71 count += be32_to_cpu(ext->block_count); 72 return count; 73 } 74 75 static u32 hfsplus_ext_lastblock(struct hfsplus_extent *ext) 76 { 77 int i; 78 79 ext += 7; 80 for (i = 0; i < 7; ext--, i++) 81 if (ext->block_count) 82 break; 83 return be32_to_cpu(ext->start_block) + be32_to_cpu(ext->block_count); 84 } 85 86 static void __hfsplus_ext_write_extent(struct inode *inode, 87 struct hfs_find_data *fd) 88 { 89 struct hfsplus_inode_info *hip = HFSPLUS_I(inode); 90 int res; 91 92 WARN_ON(!mutex_is_locked(&hip->extents_lock)); 93 94 hfsplus_ext_build_key(fd->search_key, inode->i_ino, hip->cached_start, 95 HFSPLUS_IS_RSRC(inode) ? 96 HFSPLUS_TYPE_RSRC : HFSPLUS_TYPE_DATA); 97 98 res = hfs_brec_find(fd); 99 if (hip->extent_state & HFSPLUS_EXT_NEW) { 100 if (res != -ENOENT) 101 return; 102 hfs_brec_insert(fd, hip->cached_extents, 103 sizeof(hfsplus_extent_rec)); 104 hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW); 105 } else { 106 if (res) 107 return; 108 hfs_bnode_write(fd->bnode, hip->cached_extents, 109 fd->entryoffset, fd->entrylength); 110 hip->extent_state &= ~HFSPLUS_EXT_DIRTY; 111 } 112 113 /* 114 * We can't just use hfsplus_mark_inode_dirty here, because we 115 * also get called from hfsplus_write_inode, which should not 116 * redirty the inode. Instead the callers have to be careful 117 * to explicily mark the inode dirty, too. 118 */ 119 set_bit(HFSPLUS_I_EXT_DIRTY, &hip->flags); 120 } 121 122 static int hfsplus_ext_write_extent_locked(struct inode *inode) 123 { 124 int res; 125 126 if (HFSPLUS_I(inode)->extent_state & HFSPLUS_EXT_DIRTY) { 127 struct hfs_find_data fd; 128 129 res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->ext_tree, &fd); 130 if (res) 131 return res; 132 __hfsplus_ext_write_extent(inode, &fd); 133 hfs_find_exit(&fd); 134 } 135 return 0; 136 } 137 138 int hfsplus_ext_write_extent(struct inode *inode) 139 { 140 int res; 141 142 mutex_lock(&HFSPLUS_I(inode)->extents_lock); 143 res = hfsplus_ext_write_extent_locked(inode); 144 mutex_unlock(&HFSPLUS_I(inode)->extents_lock); 145 146 return res; 147 } 148 149 static inline int __hfsplus_ext_read_extent(struct hfs_find_data *fd, 150 struct hfsplus_extent *extent, 151 u32 cnid, u32 block, u8 type) 152 { 153 int res; 154 155 hfsplus_ext_build_key(fd->search_key, cnid, block, type); 156 fd->key->ext.cnid = 0; 157 res = hfs_brec_find(fd); 158 if (res && res != -ENOENT) 159 return res; 160 if (fd->key->ext.cnid != fd->search_key->ext.cnid || 161 fd->key->ext.fork_type != fd->search_key->ext.fork_type) 162 return -ENOENT; 163 if (fd->entrylength != sizeof(hfsplus_extent_rec)) 164 return -EIO; 165 hfs_bnode_read(fd->bnode, extent, fd->entryoffset, 166 sizeof(hfsplus_extent_rec)); 167 return 0; 168 } 169 170 static inline int __hfsplus_ext_cache_extent(struct hfs_find_data *fd, 171 struct inode *inode, u32 block) 172 { 173 struct hfsplus_inode_info *hip = HFSPLUS_I(inode); 174 int res; 175 176 WARN_ON(!mutex_is_locked(&hip->extents_lock)); 177 178 if (hip->extent_state & HFSPLUS_EXT_DIRTY) 179 __hfsplus_ext_write_extent(inode, fd); 180 181 res = __hfsplus_ext_read_extent(fd, hip->cached_extents, inode->i_ino, 182 block, HFSPLUS_IS_RSRC(inode) ? 183 HFSPLUS_TYPE_RSRC : 184 HFSPLUS_TYPE_DATA); 185 if (!res) { 186 hip->cached_start = be32_to_cpu(fd->key->ext.start_block); 187 hip->cached_blocks = 188 hfsplus_ext_block_count(hip->cached_extents); 189 } else { 190 hip->cached_start = hip->cached_blocks = 0; 191 hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW); 192 } 193 return res; 194 } 195 196 static int hfsplus_ext_read_extent(struct inode *inode, u32 block) 197 { 198 struct hfsplus_inode_info *hip = HFSPLUS_I(inode); 199 struct hfs_find_data fd; 200 int res; 201 202 if (block >= hip->cached_start && 203 block < hip->cached_start + hip->cached_blocks) 204 return 0; 205 206 res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->ext_tree, &fd); 207 if (!res) { 208 res = __hfsplus_ext_cache_extent(&fd, inode, block); 209 hfs_find_exit(&fd); 210 } 211 return res; 212 } 213 214 /* Get a block at iblock for inode, possibly allocating if create */ 215 int hfsplus_get_block(struct inode *inode, sector_t iblock, 216 struct buffer_head *bh_result, int create) 217 { 218 struct super_block *sb = inode->i_sb; 219 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); 220 struct hfsplus_inode_info *hip = HFSPLUS_I(inode); 221 int res = -EIO; 222 u32 ablock, dblock, mask; 223 sector_t sector; 224 int was_dirty = 0; 225 int shift; 226 227 /* Convert inode block to disk allocation block */ 228 shift = sbi->alloc_blksz_shift - sb->s_blocksize_bits; 229 ablock = iblock >> sbi->fs_shift; 230 231 if (iblock >= hip->fs_blocks) { 232 if (iblock > hip->fs_blocks || !create) 233 return -EIO; 234 if (ablock >= hip->alloc_blocks) { 235 res = hfsplus_file_extend(inode); 236 if (res) 237 return res; 238 } 239 } else 240 create = 0; 241 242 if (ablock < hip->first_blocks) { 243 dblock = hfsplus_ext_find_block(hip->first_extents, ablock); 244 goto done; 245 } 246 247 if (inode->i_ino == HFSPLUS_EXT_CNID) 248 return -EIO; 249 250 mutex_lock(&hip->extents_lock); 251 252 /* 253 * hfsplus_ext_read_extent will write out a cached extent into 254 * the extents btree. In that case we may have to mark the inode 255 * dirty even for a pure read of an extent here. 256 */ 257 was_dirty = (hip->extent_state & HFSPLUS_EXT_DIRTY); 258 res = hfsplus_ext_read_extent(inode, ablock); 259 if (res) { 260 mutex_unlock(&hip->extents_lock); 261 return -EIO; 262 } 263 dblock = hfsplus_ext_find_block(hip->cached_extents, 264 ablock - hip->cached_start); 265 mutex_unlock(&hip->extents_lock); 266 267 done: 268 dprint(DBG_EXTENT, "get_block(%lu): %llu - %u\n", 269 inode->i_ino, (long long)iblock, dblock); 270 271 mask = (1 << sbi->fs_shift) - 1; 272 sector = ((sector_t)dblock << sbi->fs_shift) + 273 sbi->blockoffset + (iblock & mask); 274 map_bh(bh_result, sb, sector); 275 276 if (create) { 277 set_buffer_new(bh_result); 278 hip->phys_size += sb->s_blocksize; 279 hip->fs_blocks++; 280 inode_add_bytes(inode, sb->s_blocksize); 281 } 282 if (create || was_dirty) 283 mark_inode_dirty(inode); 284 return 0; 285 } 286 287 static void hfsplus_dump_extent(struct hfsplus_extent *extent) 288 { 289 int i; 290 291 dprint(DBG_EXTENT, " "); 292 for (i = 0; i < 8; i++) 293 dprint(DBG_EXTENT, " %u:%u", be32_to_cpu(extent[i].start_block), 294 be32_to_cpu(extent[i].block_count)); 295 dprint(DBG_EXTENT, "\n"); 296 } 297 298 static int hfsplus_add_extent(struct hfsplus_extent *extent, u32 offset, 299 u32 alloc_block, u32 block_count) 300 { 301 u32 count, start; 302 int i; 303 304 hfsplus_dump_extent(extent); 305 for (i = 0; i < 8; extent++, i++) { 306 count = be32_to_cpu(extent->block_count); 307 if (offset == count) { 308 start = be32_to_cpu(extent->start_block); 309 if (alloc_block != start + count) { 310 if (++i >= 8) 311 return -ENOSPC; 312 extent++; 313 extent->start_block = cpu_to_be32(alloc_block); 314 } else 315 block_count += count; 316 extent->block_count = cpu_to_be32(block_count); 317 return 0; 318 } else if (offset < count) 319 break; 320 offset -= count; 321 } 322 /* panic? */ 323 return -EIO; 324 } 325 326 static int hfsplus_free_extents(struct super_block *sb, 327 struct hfsplus_extent *extent, 328 u32 offset, u32 block_nr) 329 { 330 u32 count, start; 331 int i; 332 333 hfsplus_dump_extent(extent); 334 for (i = 0; i < 8; extent++, i++) { 335 count = be32_to_cpu(extent->block_count); 336 if (offset == count) 337 goto found; 338 else if (offset < count) 339 break; 340 offset -= count; 341 } 342 /* panic? */ 343 return -EIO; 344 found: 345 for (;;) { 346 start = be32_to_cpu(extent->start_block); 347 if (count <= block_nr) { 348 hfsplus_block_free(sb, start, count); 349 extent->block_count = 0; 350 extent->start_block = 0; 351 block_nr -= count; 352 } else { 353 count -= block_nr; 354 hfsplus_block_free(sb, start + count, block_nr); 355 extent->block_count = cpu_to_be32(count); 356 block_nr = 0; 357 } 358 if (!block_nr || !i) 359 return 0; 360 i--; 361 extent--; 362 count = be32_to_cpu(extent->block_count); 363 } 364 } 365 366 int hfsplus_free_fork(struct super_block *sb, u32 cnid, 367 struct hfsplus_fork_raw *fork, int type) 368 { 369 struct hfs_find_data fd; 370 hfsplus_extent_rec ext_entry; 371 u32 total_blocks, blocks, start; 372 int res, i; 373 374 total_blocks = be32_to_cpu(fork->total_blocks); 375 if (!total_blocks) 376 return 0; 377 378 blocks = 0; 379 for (i = 0; i < 8; i++) 380 blocks += be32_to_cpu(fork->extents[i].block_count); 381 382 res = hfsplus_free_extents(sb, fork->extents, blocks, blocks); 383 if (res) 384 return res; 385 if (total_blocks == blocks) 386 return 0; 387 388 res = hfs_find_init(HFSPLUS_SB(sb)->ext_tree, &fd); 389 if (res) 390 return res; 391 do { 392 res = __hfsplus_ext_read_extent(&fd, ext_entry, cnid, 393 total_blocks, type); 394 if (res) 395 break; 396 start = be32_to_cpu(fd.key->ext.start_block); 397 hfsplus_free_extents(sb, ext_entry, 398 total_blocks - start, 399 total_blocks); 400 hfs_brec_remove(&fd); 401 total_blocks = start; 402 } while (total_blocks > blocks); 403 hfs_find_exit(&fd); 404 405 return res; 406 } 407 408 int hfsplus_file_extend(struct inode *inode) 409 { 410 struct super_block *sb = inode->i_sb; 411 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); 412 struct hfsplus_inode_info *hip = HFSPLUS_I(inode); 413 u32 start, len, goal; 414 int res; 415 416 if (sbi->alloc_file->i_size * 8 < 417 sbi->total_blocks - sbi->free_blocks + 8) { 418 /* extend alloc file */ 419 printk(KERN_ERR "hfs: extend alloc file! " 420 "(%llu,%u,%u)\n", 421 sbi->alloc_file->i_size * 8, 422 sbi->total_blocks, sbi->free_blocks); 423 return -ENOSPC; 424 } 425 426 mutex_lock(&hip->extents_lock); 427 if (hip->alloc_blocks == hip->first_blocks) 428 goal = hfsplus_ext_lastblock(hip->first_extents); 429 else { 430 res = hfsplus_ext_read_extent(inode, hip->alloc_blocks); 431 if (res) 432 goto out; 433 goal = hfsplus_ext_lastblock(hip->cached_extents); 434 } 435 436 len = hip->clump_blocks; 437 start = hfsplus_block_allocate(sb, sbi->total_blocks, goal, &len); 438 if (start >= sbi->total_blocks) { 439 start = hfsplus_block_allocate(sb, goal, 0, &len); 440 if (start >= goal) { 441 res = -ENOSPC; 442 goto out; 443 } 444 } 445 446 dprint(DBG_EXTENT, "extend %lu: %u,%u\n", inode->i_ino, start, len); 447 448 if (hip->alloc_blocks <= hip->first_blocks) { 449 if (!hip->first_blocks) { 450 dprint(DBG_EXTENT, "first extents\n"); 451 /* no extents yet */ 452 hip->first_extents[0].start_block = cpu_to_be32(start); 453 hip->first_extents[0].block_count = cpu_to_be32(len); 454 res = 0; 455 } else { 456 /* try to append to extents in inode */ 457 res = hfsplus_add_extent(hip->first_extents, 458 hip->alloc_blocks, 459 start, len); 460 if (res == -ENOSPC) 461 goto insert_extent; 462 } 463 if (!res) { 464 hfsplus_dump_extent(hip->first_extents); 465 hip->first_blocks += len; 466 } 467 } else { 468 res = hfsplus_add_extent(hip->cached_extents, 469 hip->alloc_blocks - hip->cached_start, 470 start, len); 471 if (!res) { 472 hfsplus_dump_extent(hip->cached_extents); 473 hip->extent_state |= HFSPLUS_EXT_DIRTY; 474 hip->cached_blocks += len; 475 } else if (res == -ENOSPC) 476 goto insert_extent; 477 } 478 out: 479 mutex_unlock(&hip->extents_lock); 480 if (!res) { 481 hip->alloc_blocks += len; 482 hfsplus_mark_inode_dirty(inode, HFSPLUS_I_ALLOC_DIRTY); 483 } 484 return res; 485 486 insert_extent: 487 dprint(DBG_EXTENT, "insert new extent\n"); 488 res = hfsplus_ext_write_extent_locked(inode); 489 if (res) 490 goto out; 491 492 memset(hip->cached_extents, 0, sizeof(hfsplus_extent_rec)); 493 hip->cached_extents[0].start_block = cpu_to_be32(start); 494 hip->cached_extents[0].block_count = cpu_to_be32(len); 495 hfsplus_dump_extent(hip->cached_extents); 496 hip->extent_state |= HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW; 497 hip->cached_start = hip->alloc_blocks; 498 hip->cached_blocks = len; 499 500 res = 0; 501 goto out; 502 } 503 504 void hfsplus_file_truncate(struct inode *inode) 505 { 506 struct super_block *sb = inode->i_sb; 507 struct hfsplus_inode_info *hip = HFSPLUS_I(inode); 508 struct hfs_find_data fd; 509 u32 alloc_cnt, blk_cnt, start; 510 int res; 511 512 dprint(DBG_INODE, "truncate: %lu, %llu -> %llu\n", 513 inode->i_ino, (long long)hip->phys_size, 514 inode->i_size); 515 516 if (inode->i_size > hip->phys_size) { 517 struct address_space *mapping = inode->i_mapping; 518 struct page *page; 519 void *fsdata; 520 u32 size = inode->i_size; 521 522 res = pagecache_write_begin(NULL, mapping, size, 0, 523 AOP_FLAG_UNINTERRUPTIBLE, 524 &page, &fsdata); 525 if (res) 526 return; 527 res = pagecache_write_end(NULL, mapping, size, 528 0, 0, page, fsdata); 529 if (res < 0) 530 return; 531 mark_inode_dirty(inode); 532 return; 533 } else if (inode->i_size == hip->phys_size) 534 return; 535 536 blk_cnt = (inode->i_size + HFSPLUS_SB(sb)->alloc_blksz - 1) >> 537 HFSPLUS_SB(sb)->alloc_blksz_shift; 538 alloc_cnt = hip->alloc_blocks; 539 if (blk_cnt == alloc_cnt) 540 goto out; 541 542 mutex_lock(&hip->extents_lock); 543 res = hfs_find_init(HFSPLUS_SB(sb)->ext_tree, &fd); 544 if (res) { 545 mutex_unlock(&hip->extents_lock); 546 /* XXX: We lack error handling of hfsplus_file_truncate() */ 547 return; 548 } 549 while (1) { 550 if (alloc_cnt == hip->first_blocks) { 551 hfsplus_free_extents(sb, hip->first_extents, 552 alloc_cnt, alloc_cnt - blk_cnt); 553 hfsplus_dump_extent(hip->first_extents); 554 hip->first_blocks = blk_cnt; 555 break; 556 } 557 res = __hfsplus_ext_cache_extent(&fd, inode, alloc_cnt); 558 if (res) 559 break; 560 start = hip->cached_start; 561 hfsplus_free_extents(sb, hip->cached_extents, 562 alloc_cnt - start, alloc_cnt - blk_cnt); 563 hfsplus_dump_extent(hip->cached_extents); 564 if (blk_cnt > start) { 565 hip->extent_state |= HFSPLUS_EXT_DIRTY; 566 break; 567 } 568 alloc_cnt = start; 569 hip->cached_start = hip->cached_blocks = 0; 570 hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW); 571 hfs_brec_remove(&fd); 572 } 573 hfs_find_exit(&fd); 574 mutex_unlock(&hip->extents_lock); 575 576 hip->alloc_blocks = blk_cnt; 577 out: 578 hip->phys_size = inode->i_size; 579 hip->fs_blocks = (inode->i_size + sb->s_blocksize - 1) >> 580 sb->s_blocksize_bits; 581 inode_set_bytes(inode, hip->fs_blocks << sb->s_blocksize_bits); 582 hfsplus_mark_inode_dirty(inode, HFSPLUS_I_ALLOC_DIRTY); 583 } 584