1 /* 2 * linux/fs/hfsplus/extents.c 3 * 4 * Copyright (C) 2001 5 * Brad Boyer (flar@allandria.com) 6 * (C) 2003 Ardis Technologies <roman@ardistech.com> 7 * 8 * Handling of Extents both in catalog and extents overflow trees 9 */ 10 11 #include <linux/errno.h> 12 #include <linux/fs.h> 13 #include <linux/pagemap.h> 14 15 #include "hfsplus_fs.h" 16 #include "hfsplus_raw.h" 17 18 /* Compare two extents keys, returns 0 on same, pos/neg for difference */ 19 int hfsplus_ext_cmp_key(const hfsplus_btree_key *k1, 20 const hfsplus_btree_key *k2) 21 { 22 __be32 k1id, k2id; 23 __be32 k1s, k2s; 24 25 k1id = k1->ext.cnid; 26 k2id = k2->ext.cnid; 27 if (k1id != k2id) 28 return be32_to_cpu(k1id) < be32_to_cpu(k2id) ? -1 : 1; 29 30 if (k1->ext.fork_type != k2->ext.fork_type) 31 return k1->ext.fork_type < k2->ext.fork_type ? -1 : 1; 32 33 k1s = k1->ext.start_block; 34 k2s = k2->ext.start_block; 35 if (k1s == k2s) 36 return 0; 37 return be32_to_cpu(k1s) < be32_to_cpu(k2s) ? -1 : 1; 38 } 39 40 static void hfsplus_ext_build_key(hfsplus_btree_key *key, u32 cnid, 41 u32 block, u8 type) 42 { 43 key->key_len = cpu_to_be16(HFSPLUS_EXT_KEYLEN - 2); 44 key->ext.cnid = cpu_to_be32(cnid); 45 key->ext.start_block = cpu_to_be32(block); 46 key->ext.fork_type = type; 47 key->ext.pad = 0; 48 } 49 50 static u32 hfsplus_ext_find_block(struct hfsplus_extent *ext, u32 off) 51 { 52 int i; 53 u32 count; 54 55 for (i = 0; i < 8; ext++, i++) { 56 count = be32_to_cpu(ext->block_count); 57 if (off < count) 58 return be32_to_cpu(ext->start_block) + off; 59 off -= count; 60 } 61 /* panic? */ 62 return 0; 63 } 64 65 static int hfsplus_ext_block_count(struct hfsplus_extent *ext) 66 { 67 int i; 68 u32 count = 0; 69 70 for (i = 0; i < 8; ext++, i++) 71 count += be32_to_cpu(ext->block_count); 72 return count; 73 } 74 75 static u32 hfsplus_ext_lastblock(struct hfsplus_extent *ext) 76 { 77 int i; 78 79 ext += 7; 80 for (i = 0; i < 7; ext--, i++) 81 if (ext->block_count) 82 break; 83 return be32_to_cpu(ext->start_block) + be32_to_cpu(ext->block_count); 84 } 85 86 static void __hfsplus_ext_write_extent(struct inode *inode, 87 struct hfs_find_data *fd) 88 { 89 struct hfsplus_inode_info *hip = HFSPLUS_I(inode); 90 int res; 91 92 WARN_ON(!mutex_is_locked(&hip->extents_lock)); 93 94 hfsplus_ext_build_key(fd->search_key, inode->i_ino, hip->cached_start, 95 HFSPLUS_IS_RSRC(inode) ? 96 HFSPLUS_TYPE_RSRC : HFSPLUS_TYPE_DATA); 97 98 res = hfs_brec_find(fd); 99 if (hip->extent_state & HFSPLUS_EXT_NEW) { 100 if (res != -ENOENT) 101 return; 102 hfs_brec_insert(fd, hip->cached_extents, 103 sizeof(hfsplus_extent_rec)); 104 hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW); 105 } else { 106 if (res) 107 return; 108 hfs_bnode_write(fd->bnode, hip->cached_extents, 109 fd->entryoffset, fd->entrylength); 110 hip->extent_state &= ~HFSPLUS_EXT_DIRTY; 111 } 112 113 /* 114 * We can't just use hfsplus_mark_inode_dirty here, because we 115 * also get called from hfsplus_write_inode, which should not 116 * redirty the inode. Instead the callers have to be careful 117 * to explicily mark the inode dirty, too. 118 */ 119 set_bit(HFSPLUS_I_EXT_DIRTY, &hip->flags); 120 } 121 122 static void hfsplus_ext_write_extent_locked(struct inode *inode) 123 { 124 if (HFSPLUS_I(inode)->extent_state & HFSPLUS_EXT_DIRTY) { 125 struct hfs_find_data fd; 126 127 hfs_find_init(HFSPLUS_SB(inode->i_sb)->ext_tree, &fd); 128 __hfsplus_ext_write_extent(inode, &fd); 129 hfs_find_exit(&fd); 130 } 131 } 132 133 void hfsplus_ext_write_extent(struct inode *inode) 134 { 135 mutex_lock(&HFSPLUS_I(inode)->extents_lock); 136 hfsplus_ext_write_extent_locked(inode); 137 mutex_unlock(&HFSPLUS_I(inode)->extents_lock); 138 } 139 140 static inline int __hfsplus_ext_read_extent(struct hfs_find_data *fd, 141 struct hfsplus_extent *extent, 142 u32 cnid, u32 block, u8 type) 143 { 144 int res; 145 146 hfsplus_ext_build_key(fd->search_key, cnid, block, type); 147 fd->key->ext.cnid = 0; 148 res = hfs_brec_find(fd); 149 if (res && res != -ENOENT) 150 return res; 151 if (fd->key->ext.cnid != fd->search_key->ext.cnid || 152 fd->key->ext.fork_type != fd->search_key->ext.fork_type) 153 return -ENOENT; 154 if (fd->entrylength != sizeof(hfsplus_extent_rec)) 155 return -EIO; 156 hfs_bnode_read(fd->bnode, extent, fd->entryoffset, 157 sizeof(hfsplus_extent_rec)); 158 return 0; 159 } 160 161 static inline int __hfsplus_ext_cache_extent(struct hfs_find_data *fd, 162 struct inode *inode, u32 block) 163 { 164 struct hfsplus_inode_info *hip = HFSPLUS_I(inode); 165 int res; 166 167 WARN_ON(!mutex_is_locked(&hip->extents_lock)); 168 169 if (hip->extent_state & HFSPLUS_EXT_DIRTY) 170 __hfsplus_ext_write_extent(inode, fd); 171 172 res = __hfsplus_ext_read_extent(fd, hip->cached_extents, inode->i_ino, 173 block, HFSPLUS_IS_RSRC(inode) ? 174 HFSPLUS_TYPE_RSRC : 175 HFSPLUS_TYPE_DATA); 176 if (!res) { 177 hip->cached_start = be32_to_cpu(fd->key->ext.start_block); 178 hip->cached_blocks = 179 hfsplus_ext_block_count(hip->cached_extents); 180 } else { 181 hip->cached_start = hip->cached_blocks = 0; 182 hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW); 183 } 184 return res; 185 } 186 187 static int hfsplus_ext_read_extent(struct inode *inode, u32 block) 188 { 189 struct hfsplus_inode_info *hip = HFSPLUS_I(inode); 190 struct hfs_find_data fd; 191 int res; 192 193 if (block >= hip->cached_start && 194 block < hip->cached_start + hip->cached_blocks) 195 return 0; 196 197 hfs_find_init(HFSPLUS_SB(inode->i_sb)->ext_tree, &fd); 198 res = __hfsplus_ext_cache_extent(&fd, inode, block); 199 hfs_find_exit(&fd); 200 return res; 201 } 202 203 /* Get a block at iblock for inode, possibly allocating if create */ 204 int hfsplus_get_block(struct inode *inode, sector_t iblock, 205 struct buffer_head *bh_result, int create) 206 { 207 struct super_block *sb = inode->i_sb; 208 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); 209 struct hfsplus_inode_info *hip = HFSPLUS_I(inode); 210 int res = -EIO; 211 u32 ablock, dblock, mask; 212 int was_dirty = 0; 213 int shift; 214 215 /* Convert inode block to disk allocation block */ 216 shift = sbi->alloc_blksz_shift - sb->s_blocksize_bits; 217 ablock = iblock >> sbi->fs_shift; 218 219 if (iblock >= hip->fs_blocks) { 220 if (iblock > hip->fs_blocks || !create) 221 return -EIO; 222 if (ablock >= hip->alloc_blocks) { 223 res = hfsplus_file_extend(inode); 224 if (res) 225 return res; 226 } 227 } else 228 create = 0; 229 230 if (ablock < hip->first_blocks) { 231 dblock = hfsplus_ext_find_block(hip->first_extents, ablock); 232 goto done; 233 } 234 235 if (inode->i_ino == HFSPLUS_EXT_CNID) 236 return -EIO; 237 238 mutex_lock(&hip->extents_lock); 239 240 /* 241 * hfsplus_ext_read_extent will write out a cached extent into 242 * the extents btree. In that case we may have to mark the inode 243 * dirty even for a pure read of an extent here. 244 */ 245 was_dirty = (hip->extent_state & HFSPLUS_EXT_DIRTY); 246 res = hfsplus_ext_read_extent(inode, ablock); 247 if (res) { 248 mutex_unlock(&hip->extents_lock); 249 return -EIO; 250 } 251 dblock = hfsplus_ext_find_block(hip->cached_extents, 252 ablock - hip->cached_start); 253 mutex_unlock(&hip->extents_lock); 254 255 done: 256 dprint(DBG_EXTENT, "get_block(%lu): %llu - %u\n", 257 inode->i_ino, (long long)iblock, dblock); 258 mask = (1 << sbi->fs_shift) - 1; 259 map_bh(bh_result, sb, 260 (dblock << sbi->fs_shift) + sbi->blockoffset + 261 (iblock & mask)); 262 if (create) { 263 set_buffer_new(bh_result); 264 hip->phys_size += sb->s_blocksize; 265 hip->fs_blocks++; 266 inode_add_bytes(inode, sb->s_blocksize); 267 } 268 if (create || was_dirty) 269 mark_inode_dirty(inode); 270 return 0; 271 } 272 273 static void hfsplus_dump_extent(struct hfsplus_extent *extent) 274 { 275 int i; 276 277 dprint(DBG_EXTENT, " "); 278 for (i = 0; i < 8; i++) 279 dprint(DBG_EXTENT, " %u:%u", be32_to_cpu(extent[i].start_block), 280 be32_to_cpu(extent[i].block_count)); 281 dprint(DBG_EXTENT, "\n"); 282 } 283 284 static int hfsplus_add_extent(struct hfsplus_extent *extent, u32 offset, 285 u32 alloc_block, u32 block_count) 286 { 287 u32 count, start; 288 int i; 289 290 hfsplus_dump_extent(extent); 291 for (i = 0; i < 8; extent++, i++) { 292 count = be32_to_cpu(extent->block_count); 293 if (offset == count) { 294 start = be32_to_cpu(extent->start_block); 295 if (alloc_block != start + count) { 296 if (++i >= 8) 297 return -ENOSPC; 298 extent++; 299 extent->start_block = cpu_to_be32(alloc_block); 300 } else 301 block_count += count; 302 extent->block_count = cpu_to_be32(block_count); 303 return 0; 304 } else if (offset < count) 305 break; 306 offset -= count; 307 } 308 /* panic? */ 309 return -EIO; 310 } 311 312 static int hfsplus_free_extents(struct super_block *sb, 313 struct hfsplus_extent *extent, 314 u32 offset, u32 block_nr) 315 { 316 u32 count, start; 317 int i; 318 319 hfsplus_dump_extent(extent); 320 for (i = 0; i < 8; extent++, i++) { 321 count = be32_to_cpu(extent->block_count); 322 if (offset == count) 323 goto found; 324 else if (offset < count) 325 break; 326 offset -= count; 327 } 328 /* panic? */ 329 return -EIO; 330 found: 331 for (;;) { 332 start = be32_to_cpu(extent->start_block); 333 if (count <= block_nr) { 334 hfsplus_block_free(sb, start, count); 335 extent->block_count = 0; 336 extent->start_block = 0; 337 block_nr -= count; 338 } else { 339 count -= block_nr; 340 hfsplus_block_free(sb, start + count, block_nr); 341 extent->block_count = cpu_to_be32(count); 342 block_nr = 0; 343 } 344 if (!block_nr || !i) 345 return 0; 346 i--; 347 extent--; 348 count = be32_to_cpu(extent->block_count); 349 } 350 } 351 352 int hfsplus_free_fork(struct super_block *sb, u32 cnid, 353 struct hfsplus_fork_raw *fork, int type) 354 { 355 struct hfs_find_data fd; 356 hfsplus_extent_rec ext_entry; 357 u32 total_blocks, blocks, start; 358 int res, i; 359 360 total_blocks = be32_to_cpu(fork->total_blocks); 361 if (!total_blocks) 362 return 0; 363 364 blocks = 0; 365 for (i = 0; i < 8; i++) 366 blocks += be32_to_cpu(fork->extents[i].block_count); 367 368 res = hfsplus_free_extents(sb, fork->extents, blocks, blocks); 369 if (res) 370 return res; 371 if (total_blocks == blocks) 372 return 0; 373 374 hfs_find_init(HFSPLUS_SB(sb)->ext_tree, &fd); 375 do { 376 res = __hfsplus_ext_read_extent(&fd, ext_entry, cnid, 377 total_blocks, type); 378 if (res) 379 break; 380 start = be32_to_cpu(fd.key->ext.start_block); 381 hfsplus_free_extents(sb, ext_entry, 382 total_blocks - start, 383 total_blocks); 384 hfs_brec_remove(&fd); 385 total_blocks = start; 386 } while (total_blocks > blocks); 387 hfs_find_exit(&fd); 388 389 return res; 390 } 391 392 int hfsplus_file_extend(struct inode *inode) 393 { 394 struct super_block *sb = inode->i_sb; 395 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); 396 struct hfsplus_inode_info *hip = HFSPLUS_I(inode); 397 u32 start, len, goal; 398 int res; 399 400 if (sbi->alloc_file->i_size * 8 < 401 sbi->total_blocks - sbi->free_blocks + 8) { 402 /* extend alloc file */ 403 printk(KERN_ERR "hfs: extend alloc file! " 404 "(%llu,%u,%u)\n", 405 sbi->alloc_file->i_size * 8, 406 sbi->total_blocks, sbi->free_blocks); 407 return -ENOSPC; 408 } 409 410 mutex_lock(&hip->extents_lock); 411 if (hip->alloc_blocks == hip->first_blocks) 412 goal = hfsplus_ext_lastblock(hip->first_extents); 413 else { 414 res = hfsplus_ext_read_extent(inode, hip->alloc_blocks); 415 if (res) 416 goto out; 417 goal = hfsplus_ext_lastblock(hip->cached_extents); 418 } 419 420 len = hip->clump_blocks; 421 start = hfsplus_block_allocate(sb, sbi->total_blocks, goal, &len); 422 if (start >= sbi->total_blocks) { 423 start = hfsplus_block_allocate(sb, goal, 0, &len); 424 if (start >= goal) { 425 res = -ENOSPC; 426 goto out; 427 } 428 } 429 430 dprint(DBG_EXTENT, "extend %lu: %u,%u\n", inode->i_ino, start, len); 431 432 if (hip->alloc_blocks <= hip->first_blocks) { 433 if (!hip->first_blocks) { 434 dprint(DBG_EXTENT, "first extents\n"); 435 /* no extents yet */ 436 hip->first_extents[0].start_block = cpu_to_be32(start); 437 hip->first_extents[0].block_count = cpu_to_be32(len); 438 res = 0; 439 } else { 440 /* try to append to extents in inode */ 441 res = hfsplus_add_extent(hip->first_extents, 442 hip->alloc_blocks, 443 start, len); 444 if (res == -ENOSPC) 445 goto insert_extent; 446 } 447 if (!res) { 448 hfsplus_dump_extent(hip->first_extents); 449 hip->first_blocks += len; 450 } 451 } else { 452 res = hfsplus_add_extent(hip->cached_extents, 453 hip->alloc_blocks - hip->cached_start, 454 start, len); 455 if (!res) { 456 hfsplus_dump_extent(hip->cached_extents); 457 hip->extent_state |= HFSPLUS_EXT_DIRTY; 458 hip->cached_blocks += len; 459 } else if (res == -ENOSPC) 460 goto insert_extent; 461 } 462 out: 463 mutex_unlock(&hip->extents_lock); 464 if (!res) { 465 hip->alloc_blocks += len; 466 hfsplus_mark_inode_dirty(inode, HFSPLUS_I_ALLOC_DIRTY); 467 } 468 return res; 469 470 insert_extent: 471 dprint(DBG_EXTENT, "insert new extent\n"); 472 hfsplus_ext_write_extent_locked(inode); 473 474 memset(hip->cached_extents, 0, sizeof(hfsplus_extent_rec)); 475 hip->cached_extents[0].start_block = cpu_to_be32(start); 476 hip->cached_extents[0].block_count = cpu_to_be32(len); 477 hfsplus_dump_extent(hip->cached_extents); 478 hip->extent_state |= HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW; 479 hip->cached_start = hip->alloc_blocks; 480 hip->cached_blocks = len; 481 482 res = 0; 483 goto out; 484 } 485 486 void hfsplus_file_truncate(struct inode *inode) 487 { 488 struct super_block *sb = inode->i_sb; 489 struct hfsplus_inode_info *hip = HFSPLUS_I(inode); 490 struct hfs_find_data fd; 491 u32 alloc_cnt, blk_cnt, start; 492 int res; 493 494 dprint(DBG_INODE, "truncate: %lu, %llu -> %llu\n", 495 inode->i_ino, (long long)hip->phys_size, 496 inode->i_size); 497 498 if (inode->i_size > hip->phys_size) { 499 struct address_space *mapping = inode->i_mapping; 500 struct page *page; 501 void *fsdata; 502 u32 size = inode->i_size; 503 int res; 504 505 res = pagecache_write_begin(NULL, mapping, size, 0, 506 AOP_FLAG_UNINTERRUPTIBLE, 507 &page, &fsdata); 508 if (res) 509 return; 510 res = pagecache_write_end(NULL, mapping, size, 511 0, 0, page, fsdata); 512 if (res < 0) 513 return; 514 mark_inode_dirty(inode); 515 return; 516 } else if (inode->i_size == hip->phys_size) 517 return; 518 519 blk_cnt = (inode->i_size + HFSPLUS_SB(sb)->alloc_blksz - 1) >> 520 HFSPLUS_SB(sb)->alloc_blksz_shift; 521 alloc_cnt = hip->alloc_blocks; 522 if (blk_cnt == alloc_cnt) 523 goto out; 524 525 mutex_lock(&hip->extents_lock); 526 hfs_find_init(HFSPLUS_SB(sb)->ext_tree, &fd); 527 while (1) { 528 if (alloc_cnt == hip->first_blocks) { 529 hfsplus_free_extents(sb, hip->first_extents, 530 alloc_cnt, alloc_cnt - blk_cnt); 531 hfsplus_dump_extent(hip->first_extents); 532 hip->first_blocks = blk_cnt; 533 break; 534 } 535 res = __hfsplus_ext_cache_extent(&fd, inode, alloc_cnt); 536 if (res) 537 break; 538 start = hip->cached_start; 539 hfsplus_free_extents(sb, hip->cached_extents, 540 alloc_cnt - start, alloc_cnt - blk_cnt); 541 hfsplus_dump_extent(hip->cached_extents); 542 if (blk_cnt > start) { 543 hip->extent_state |= HFSPLUS_EXT_DIRTY; 544 break; 545 } 546 alloc_cnt = start; 547 hip->cached_start = hip->cached_blocks = 0; 548 hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW); 549 hfs_brec_remove(&fd); 550 } 551 hfs_find_exit(&fd); 552 mutex_unlock(&hip->extents_lock); 553 554 hip->alloc_blocks = blk_cnt; 555 out: 556 hip->phys_size = inode->i_size; 557 hip->fs_blocks = (inode->i_size + sb->s_blocksize - 1) >> 558 sb->s_blocksize_bits; 559 inode_set_bytes(inode, hip->fs_blocks << sb->s_blocksize_bits); 560 hfsplus_mark_inode_dirty(inode, HFSPLUS_I_ALLOC_DIRTY); 561 } 562