1 /* 2 * linux/fs/hfsplus/extents.c 3 * 4 * Copyright (C) 2001 5 * Brad Boyer (flar@allandria.com) 6 * (C) 2003 Ardis Technologies <roman@ardistech.com> 7 * 8 * Handling of Extents both in catalog and extents overflow trees 9 */ 10 11 #include <linux/errno.h> 12 #include <linux/fs.h> 13 #include <linux/pagemap.h> 14 15 #include "hfsplus_fs.h" 16 #include "hfsplus_raw.h" 17 18 /* Compare two extents keys, returns 0 on same, pos/neg for difference */ 19 int hfsplus_ext_cmp_key(const hfsplus_btree_key *k1, 20 const hfsplus_btree_key *k2) 21 { 22 __be32 k1id, k2id; 23 __be32 k1s, k2s; 24 25 k1id = k1->ext.cnid; 26 k2id = k2->ext.cnid; 27 if (k1id != k2id) 28 return be32_to_cpu(k1id) < be32_to_cpu(k2id) ? -1 : 1; 29 30 if (k1->ext.fork_type != k2->ext.fork_type) 31 return k1->ext.fork_type < k2->ext.fork_type ? -1 : 1; 32 33 k1s = k1->ext.start_block; 34 k2s = k2->ext.start_block; 35 if (k1s == k2s) 36 return 0; 37 return be32_to_cpu(k1s) < be32_to_cpu(k2s) ? -1 : 1; 38 } 39 40 static void hfsplus_ext_build_key(hfsplus_btree_key *key, u32 cnid, 41 u32 block, u8 type) 42 { 43 key->key_len = cpu_to_be16(HFSPLUS_EXT_KEYLEN - 2); 44 key->ext.cnid = cpu_to_be32(cnid); 45 key->ext.start_block = cpu_to_be32(block); 46 key->ext.fork_type = type; 47 key->ext.pad = 0; 48 } 49 50 static u32 hfsplus_ext_find_block(struct hfsplus_extent *ext, u32 off) 51 { 52 int i; 53 u32 count; 54 55 for (i = 0; i < 8; ext++, i++) { 56 count = be32_to_cpu(ext->block_count); 57 if (off < count) 58 return be32_to_cpu(ext->start_block) + off; 59 off -= count; 60 } 61 /* panic? */ 62 return 0; 63 } 64 65 static int hfsplus_ext_block_count(struct hfsplus_extent *ext) 66 { 67 int i; 68 u32 count = 0; 69 70 for (i = 0; i < 8; ext++, i++) 71 count += be32_to_cpu(ext->block_count); 72 return count; 73 } 74 75 static u32 hfsplus_ext_lastblock(struct hfsplus_extent *ext) 76 { 77 int i; 78 79 ext += 7; 80 for (i = 0; i < 7; ext--, i++) 81 if (ext->block_count) 82 break; 83 return be32_to_cpu(ext->start_block) + be32_to_cpu(ext->block_count); 84 } 85 86 static void __hfsplus_ext_write_extent(struct inode *inode, 87 struct hfs_find_data *fd) 88 { 89 struct hfsplus_inode_info *hip = HFSPLUS_I(inode); 90 int res; 91 92 WARN_ON(!mutex_is_locked(&hip->extents_lock)); 93 94 hfsplus_ext_build_key(fd->search_key, inode->i_ino, hip->cached_start, 95 HFSPLUS_IS_RSRC(inode) ? 96 HFSPLUS_TYPE_RSRC : HFSPLUS_TYPE_DATA); 97 98 res = hfs_brec_find(fd); 99 if (hip->extent_state & HFSPLUS_EXT_NEW) { 100 if (res != -ENOENT) 101 return; 102 hfs_brec_insert(fd, hip->cached_extents, 103 sizeof(hfsplus_extent_rec)); 104 hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW); 105 } else { 106 if (res) 107 return; 108 hfs_bnode_write(fd->bnode, hip->cached_extents, 109 fd->entryoffset, fd->entrylength); 110 hip->extent_state &= ~HFSPLUS_EXT_DIRTY; 111 } 112 113 /* 114 * We can't just use hfsplus_mark_inode_dirty here, because we 115 * also get called from hfsplus_write_inode, which should not 116 * redirty the inode. Instead the callers have to be careful 117 * to explicily mark the inode dirty, too. 118 */ 119 set_bit(HFSPLUS_I_EXT_DIRTY, &hip->flags); 120 } 121 122 static void hfsplus_ext_write_extent_locked(struct inode *inode) 123 { 124 if (HFSPLUS_I(inode)->extent_state & HFSPLUS_EXT_DIRTY) { 125 struct hfs_find_data fd; 126 127 hfs_find_init(HFSPLUS_SB(inode->i_sb)->ext_tree, &fd); 128 __hfsplus_ext_write_extent(inode, &fd); 129 hfs_find_exit(&fd); 130 } 131 } 132 133 void hfsplus_ext_write_extent(struct inode *inode) 134 { 135 mutex_lock(&HFSPLUS_I(inode)->extents_lock); 136 hfsplus_ext_write_extent_locked(inode); 137 mutex_unlock(&HFSPLUS_I(inode)->extents_lock); 138 } 139 140 static inline int __hfsplus_ext_read_extent(struct hfs_find_data *fd, 141 struct hfsplus_extent *extent, 142 u32 cnid, u32 block, u8 type) 143 { 144 int res; 145 146 hfsplus_ext_build_key(fd->search_key, cnid, block, type); 147 fd->key->ext.cnid = 0; 148 res = hfs_brec_find(fd); 149 if (res && res != -ENOENT) 150 return res; 151 if (fd->key->ext.cnid != fd->search_key->ext.cnid || 152 fd->key->ext.fork_type != fd->search_key->ext.fork_type) 153 return -ENOENT; 154 if (fd->entrylength != sizeof(hfsplus_extent_rec)) 155 return -EIO; 156 hfs_bnode_read(fd->bnode, extent, fd->entryoffset, 157 sizeof(hfsplus_extent_rec)); 158 return 0; 159 } 160 161 static inline int __hfsplus_ext_cache_extent(struct hfs_find_data *fd, 162 struct inode *inode, u32 block) 163 { 164 struct hfsplus_inode_info *hip = HFSPLUS_I(inode); 165 int res; 166 167 WARN_ON(!mutex_is_locked(&hip->extents_lock)); 168 169 if (hip->extent_state & HFSPLUS_EXT_DIRTY) 170 __hfsplus_ext_write_extent(inode, fd); 171 172 res = __hfsplus_ext_read_extent(fd, hip->cached_extents, inode->i_ino, 173 block, HFSPLUS_IS_RSRC(inode) ? 174 HFSPLUS_TYPE_RSRC : 175 HFSPLUS_TYPE_DATA); 176 if (!res) { 177 hip->cached_start = be32_to_cpu(fd->key->ext.start_block); 178 hip->cached_blocks = 179 hfsplus_ext_block_count(hip->cached_extents); 180 } else { 181 hip->cached_start = hip->cached_blocks = 0; 182 hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW); 183 } 184 return res; 185 } 186 187 static int hfsplus_ext_read_extent(struct inode *inode, u32 block) 188 { 189 struct hfsplus_inode_info *hip = HFSPLUS_I(inode); 190 struct hfs_find_data fd; 191 int res; 192 193 if (block >= hip->cached_start && 194 block < hip->cached_start + hip->cached_blocks) 195 return 0; 196 197 hfs_find_init(HFSPLUS_SB(inode->i_sb)->ext_tree, &fd); 198 res = __hfsplus_ext_cache_extent(&fd, inode, block); 199 hfs_find_exit(&fd); 200 return res; 201 } 202 203 /* Get a block at iblock for inode, possibly allocating if create */ 204 int hfsplus_get_block(struct inode *inode, sector_t iblock, 205 struct buffer_head *bh_result, int create) 206 { 207 struct super_block *sb = inode->i_sb; 208 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); 209 struct hfsplus_inode_info *hip = HFSPLUS_I(inode); 210 int res = -EIO; 211 u32 ablock, dblock, mask; 212 sector_t sector; 213 int was_dirty = 0; 214 int shift; 215 216 /* Convert inode block to disk allocation block */ 217 shift = sbi->alloc_blksz_shift - sb->s_blocksize_bits; 218 ablock = iblock >> sbi->fs_shift; 219 220 if (iblock >= hip->fs_blocks) { 221 if (iblock > hip->fs_blocks || !create) 222 return -EIO; 223 if (ablock >= hip->alloc_blocks) { 224 res = hfsplus_file_extend(inode); 225 if (res) 226 return res; 227 } 228 } else 229 create = 0; 230 231 if (ablock < hip->first_blocks) { 232 dblock = hfsplus_ext_find_block(hip->first_extents, ablock); 233 goto done; 234 } 235 236 if (inode->i_ino == HFSPLUS_EXT_CNID) 237 return -EIO; 238 239 mutex_lock(&hip->extents_lock); 240 241 /* 242 * hfsplus_ext_read_extent will write out a cached extent into 243 * the extents btree. In that case we may have to mark the inode 244 * dirty even for a pure read of an extent here. 245 */ 246 was_dirty = (hip->extent_state & HFSPLUS_EXT_DIRTY); 247 res = hfsplus_ext_read_extent(inode, ablock); 248 if (res) { 249 mutex_unlock(&hip->extents_lock); 250 return -EIO; 251 } 252 dblock = hfsplus_ext_find_block(hip->cached_extents, 253 ablock - hip->cached_start); 254 mutex_unlock(&hip->extents_lock); 255 256 done: 257 dprint(DBG_EXTENT, "get_block(%lu): %llu - %u\n", 258 inode->i_ino, (long long)iblock, dblock); 259 260 mask = (1 << sbi->fs_shift) - 1; 261 sector = ((sector_t)dblock << sbi->fs_shift) + 262 sbi->blockoffset + (iblock & mask); 263 map_bh(bh_result, sb, sector); 264 265 if (create) { 266 set_buffer_new(bh_result); 267 hip->phys_size += sb->s_blocksize; 268 hip->fs_blocks++; 269 inode_add_bytes(inode, sb->s_blocksize); 270 } 271 if (create || was_dirty) 272 mark_inode_dirty(inode); 273 return 0; 274 } 275 276 static void hfsplus_dump_extent(struct hfsplus_extent *extent) 277 { 278 int i; 279 280 dprint(DBG_EXTENT, " "); 281 for (i = 0; i < 8; i++) 282 dprint(DBG_EXTENT, " %u:%u", be32_to_cpu(extent[i].start_block), 283 be32_to_cpu(extent[i].block_count)); 284 dprint(DBG_EXTENT, "\n"); 285 } 286 287 static int hfsplus_add_extent(struct hfsplus_extent *extent, u32 offset, 288 u32 alloc_block, u32 block_count) 289 { 290 u32 count, start; 291 int i; 292 293 hfsplus_dump_extent(extent); 294 for (i = 0; i < 8; extent++, i++) { 295 count = be32_to_cpu(extent->block_count); 296 if (offset == count) { 297 start = be32_to_cpu(extent->start_block); 298 if (alloc_block != start + count) { 299 if (++i >= 8) 300 return -ENOSPC; 301 extent++; 302 extent->start_block = cpu_to_be32(alloc_block); 303 } else 304 block_count += count; 305 extent->block_count = cpu_to_be32(block_count); 306 return 0; 307 } else if (offset < count) 308 break; 309 offset -= count; 310 } 311 /* panic? */ 312 return -EIO; 313 } 314 315 static int hfsplus_free_extents(struct super_block *sb, 316 struct hfsplus_extent *extent, 317 u32 offset, u32 block_nr) 318 { 319 u32 count, start; 320 int i; 321 322 hfsplus_dump_extent(extent); 323 for (i = 0; i < 8; extent++, i++) { 324 count = be32_to_cpu(extent->block_count); 325 if (offset == count) 326 goto found; 327 else if (offset < count) 328 break; 329 offset -= count; 330 } 331 /* panic? */ 332 return -EIO; 333 found: 334 for (;;) { 335 start = be32_to_cpu(extent->start_block); 336 if (count <= block_nr) { 337 hfsplus_block_free(sb, start, count); 338 extent->block_count = 0; 339 extent->start_block = 0; 340 block_nr -= count; 341 } else { 342 count -= block_nr; 343 hfsplus_block_free(sb, start + count, block_nr); 344 extent->block_count = cpu_to_be32(count); 345 block_nr = 0; 346 } 347 if (!block_nr || !i) 348 return 0; 349 i--; 350 extent--; 351 count = be32_to_cpu(extent->block_count); 352 } 353 } 354 355 int hfsplus_free_fork(struct super_block *sb, u32 cnid, 356 struct hfsplus_fork_raw *fork, int type) 357 { 358 struct hfs_find_data fd; 359 hfsplus_extent_rec ext_entry; 360 u32 total_blocks, blocks, start; 361 int res, i; 362 363 total_blocks = be32_to_cpu(fork->total_blocks); 364 if (!total_blocks) 365 return 0; 366 367 blocks = 0; 368 for (i = 0; i < 8; i++) 369 blocks += be32_to_cpu(fork->extents[i].block_count); 370 371 res = hfsplus_free_extents(sb, fork->extents, blocks, blocks); 372 if (res) 373 return res; 374 if (total_blocks == blocks) 375 return 0; 376 377 hfs_find_init(HFSPLUS_SB(sb)->ext_tree, &fd); 378 do { 379 res = __hfsplus_ext_read_extent(&fd, ext_entry, cnid, 380 total_blocks, type); 381 if (res) 382 break; 383 start = be32_to_cpu(fd.key->ext.start_block); 384 hfsplus_free_extents(sb, ext_entry, 385 total_blocks - start, 386 total_blocks); 387 hfs_brec_remove(&fd); 388 total_blocks = start; 389 } while (total_blocks > blocks); 390 hfs_find_exit(&fd); 391 392 return res; 393 } 394 395 int hfsplus_file_extend(struct inode *inode) 396 { 397 struct super_block *sb = inode->i_sb; 398 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); 399 struct hfsplus_inode_info *hip = HFSPLUS_I(inode); 400 u32 start, len, goal; 401 int res; 402 403 if (sbi->alloc_file->i_size * 8 < 404 sbi->total_blocks - sbi->free_blocks + 8) { 405 /* extend alloc file */ 406 printk(KERN_ERR "hfs: extend alloc file! " 407 "(%llu,%u,%u)\n", 408 sbi->alloc_file->i_size * 8, 409 sbi->total_blocks, sbi->free_blocks); 410 return -ENOSPC; 411 } 412 413 mutex_lock(&hip->extents_lock); 414 if (hip->alloc_blocks == hip->first_blocks) 415 goal = hfsplus_ext_lastblock(hip->first_extents); 416 else { 417 res = hfsplus_ext_read_extent(inode, hip->alloc_blocks); 418 if (res) 419 goto out; 420 goal = hfsplus_ext_lastblock(hip->cached_extents); 421 } 422 423 len = hip->clump_blocks; 424 start = hfsplus_block_allocate(sb, sbi->total_blocks, goal, &len); 425 if (start >= sbi->total_blocks) { 426 start = hfsplus_block_allocate(sb, goal, 0, &len); 427 if (start >= goal) { 428 res = -ENOSPC; 429 goto out; 430 } 431 } 432 433 dprint(DBG_EXTENT, "extend %lu: %u,%u\n", inode->i_ino, start, len); 434 435 if (hip->alloc_blocks <= hip->first_blocks) { 436 if (!hip->first_blocks) { 437 dprint(DBG_EXTENT, "first extents\n"); 438 /* no extents yet */ 439 hip->first_extents[0].start_block = cpu_to_be32(start); 440 hip->first_extents[0].block_count = cpu_to_be32(len); 441 res = 0; 442 } else { 443 /* try to append to extents in inode */ 444 res = hfsplus_add_extent(hip->first_extents, 445 hip->alloc_blocks, 446 start, len); 447 if (res == -ENOSPC) 448 goto insert_extent; 449 } 450 if (!res) { 451 hfsplus_dump_extent(hip->first_extents); 452 hip->first_blocks += len; 453 } 454 } else { 455 res = hfsplus_add_extent(hip->cached_extents, 456 hip->alloc_blocks - hip->cached_start, 457 start, len); 458 if (!res) { 459 hfsplus_dump_extent(hip->cached_extents); 460 hip->extent_state |= HFSPLUS_EXT_DIRTY; 461 hip->cached_blocks += len; 462 } else if (res == -ENOSPC) 463 goto insert_extent; 464 } 465 out: 466 mutex_unlock(&hip->extents_lock); 467 if (!res) { 468 hip->alloc_blocks += len; 469 hfsplus_mark_inode_dirty(inode, HFSPLUS_I_ALLOC_DIRTY); 470 } 471 return res; 472 473 insert_extent: 474 dprint(DBG_EXTENT, "insert new extent\n"); 475 hfsplus_ext_write_extent_locked(inode); 476 477 memset(hip->cached_extents, 0, sizeof(hfsplus_extent_rec)); 478 hip->cached_extents[0].start_block = cpu_to_be32(start); 479 hip->cached_extents[0].block_count = cpu_to_be32(len); 480 hfsplus_dump_extent(hip->cached_extents); 481 hip->extent_state |= HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW; 482 hip->cached_start = hip->alloc_blocks; 483 hip->cached_blocks = len; 484 485 res = 0; 486 goto out; 487 } 488 489 void hfsplus_file_truncate(struct inode *inode) 490 { 491 struct super_block *sb = inode->i_sb; 492 struct hfsplus_inode_info *hip = HFSPLUS_I(inode); 493 struct hfs_find_data fd; 494 u32 alloc_cnt, blk_cnt, start; 495 int res; 496 497 dprint(DBG_INODE, "truncate: %lu, %llu -> %llu\n", 498 inode->i_ino, (long long)hip->phys_size, 499 inode->i_size); 500 501 if (inode->i_size > hip->phys_size) { 502 struct address_space *mapping = inode->i_mapping; 503 struct page *page; 504 void *fsdata; 505 u32 size = inode->i_size; 506 int res; 507 508 res = pagecache_write_begin(NULL, mapping, size, 0, 509 AOP_FLAG_UNINTERRUPTIBLE, 510 &page, &fsdata); 511 if (res) 512 return; 513 res = pagecache_write_end(NULL, mapping, size, 514 0, 0, page, fsdata); 515 if (res < 0) 516 return; 517 mark_inode_dirty(inode); 518 return; 519 } else if (inode->i_size == hip->phys_size) 520 return; 521 522 blk_cnt = (inode->i_size + HFSPLUS_SB(sb)->alloc_blksz - 1) >> 523 HFSPLUS_SB(sb)->alloc_blksz_shift; 524 alloc_cnt = hip->alloc_blocks; 525 if (blk_cnt == alloc_cnt) 526 goto out; 527 528 mutex_lock(&hip->extents_lock); 529 hfs_find_init(HFSPLUS_SB(sb)->ext_tree, &fd); 530 while (1) { 531 if (alloc_cnt == hip->first_blocks) { 532 hfsplus_free_extents(sb, hip->first_extents, 533 alloc_cnt, alloc_cnt - blk_cnt); 534 hfsplus_dump_extent(hip->first_extents); 535 hip->first_blocks = blk_cnt; 536 break; 537 } 538 res = __hfsplus_ext_cache_extent(&fd, inode, alloc_cnt); 539 if (res) 540 break; 541 start = hip->cached_start; 542 hfsplus_free_extents(sb, hip->cached_extents, 543 alloc_cnt - start, alloc_cnt - blk_cnt); 544 hfsplus_dump_extent(hip->cached_extents); 545 if (blk_cnt > start) { 546 hip->extent_state |= HFSPLUS_EXT_DIRTY; 547 break; 548 } 549 alloc_cnt = start; 550 hip->cached_start = hip->cached_blocks = 0; 551 hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW); 552 hfs_brec_remove(&fd); 553 } 554 hfs_find_exit(&fd); 555 mutex_unlock(&hip->extents_lock); 556 557 hip->alloc_blocks = blk_cnt; 558 out: 559 hip->phys_size = inode->i_size; 560 hip->fs_blocks = (inode->i_size + sb->s_blocksize - 1) >> 561 sb->s_blocksize_bits; 562 inode_set_bytes(inode, hip->fs_blocks << sb->s_blocksize_bits); 563 hfsplus_mark_inode_dirty(inode, HFSPLUS_I_ALLOC_DIRTY); 564 } 565