1 /* 2 * balloc.c 3 * 4 * PURPOSE 5 * Block allocation handling routines for the OSTA-UDF(tm) filesystem. 6 * 7 * COPYRIGHT 8 * This file is distributed under the terms of the GNU General Public 9 * License (GPL). Copies of the GPL can be obtained from: 10 * ftp://prep.ai.mit.edu/pub/gnu/GPL 11 * Each contributing author retains all rights to their own work. 12 * 13 * (C) 1999-2001 Ben Fennema 14 * (C) 1999 Stelias Computing Inc 15 * 16 * HISTORY 17 * 18 * 02/24/99 blf Created. 19 * 20 */ 21 22 #include "udfdecl.h" 23 24 #include <linux/quotaops.h> 25 #include <linux/buffer_head.h> 26 #include <linux/bitops.h> 27 28 #include "udf_i.h" 29 #include "udf_sb.h" 30 31 #define udf_clear_bit(nr, addr) ext2_clear_bit(nr, addr) 32 #define udf_set_bit(nr, addr) ext2_set_bit(nr, addr) 33 #define udf_test_bit(nr, addr) ext2_test_bit(nr, addr) 34 #define udf_find_first_one_bit(addr, size) find_first_one_bit(addr, size) 35 #define udf_find_next_one_bit(addr, size, offset) \ 36 find_next_one_bit(addr, size, offset) 37 38 #define leBPL_to_cpup(x) leNUM_to_cpup(BITS_PER_LONG, x) 39 #define leNUM_to_cpup(x, y) xleNUM_to_cpup(x, y) 40 #define xleNUM_to_cpup(x, y) (le ## x ## _to_cpup(y)) 41 #define uintBPL_t uint(BITS_PER_LONG) 42 #define uint(x) xuint(x) 43 #define xuint(x) __le ## x 44 45 static inline int find_next_one_bit(void *addr, int size, int offset) 46 { 47 uintBPL_t *p = ((uintBPL_t *) addr) + (offset / BITS_PER_LONG); 48 int result = offset & ~(BITS_PER_LONG - 1); 49 unsigned long tmp; 50 51 if (offset >= size) 52 return size; 53 size -= result; 54 offset &= (BITS_PER_LONG - 1); 55 if (offset) { 56 tmp = leBPL_to_cpup(p++); 57 tmp &= ~0UL << offset; 58 if (size < BITS_PER_LONG) 59 goto found_first; 60 if (tmp) 61 goto found_middle; 62 size -= BITS_PER_LONG; 63 result += BITS_PER_LONG; 64 } 65 while (size & ~(BITS_PER_LONG - 1)) { 66 tmp = leBPL_to_cpup(p++); 67 if (tmp) 68 goto found_middle; 69 result += BITS_PER_LONG; 70 size -= BITS_PER_LONG; 71 } 72 if (!size) 73 return result; 74 tmp = leBPL_to_cpup(p); 75 found_first: 76 tmp &= ~0UL >> (BITS_PER_LONG - size); 77 found_middle: 78 return result + ffz(~tmp); 79 } 80 81 #define find_first_one_bit(addr, size)\ 82 find_next_one_bit((addr), (size), 0) 83 84 static int read_block_bitmap(struct super_block *sb, 85 struct udf_bitmap *bitmap, unsigned int block, 86 unsigned long bitmap_nr) 87 { 88 struct buffer_head *bh = NULL; 89 int retval = 0; 90 kernel_lb_addr loc; 91 92 loc.logicalBlockNum = bitmap->s_extPosition; 93 loc.partitionReferenceNum = UDF_SB(sb)->s_partition; 94 95 bh = udf_tread(sb, udf_get_lb_pblock(sb, loc, block)); 96 if (!bh) 97 retval = -EIO; 98 99 bitmap->s_block_bitmap[bitmap_nr] = bh; 100 return retval; 101 } 102 103 static int __load_block_bitmap(struct super_block *sb, 104 struct udf_bitmap *bitmap, 105 unsigned int block_group) 106 { 107 int retval = 0; 108 int nr_groups = bitmap->s_nr_groups; 109 110 if (block_group >= nr_groups) { 111 udf_debug("block_group (%d) > nr_groups (%d)\n", block_group, 112 nr_groups); 113 } 114 115 if (bitmap->s_block_bitmap[block_group]) { 116 return block_group; 117 } else { 118 retval = read_block_bitmap(sb, bitmap, block_group, 119 block_group); 120 if (retval < 0) 121 return retval; 122 return block_group; 123 } 124 } 125 126 static inline int load_block_bitmap(struct super_block *sb, 127 struct udf_bitmap *bitmap, 128 unsigned int block_group) 129 { 130 int slot; 131 132 slot = __load_block_bitmap(sb, bitmap, block_group); 133 134 if (slot < 0) 135 return slot; 136 137 if (!bitmap->s_block_bitmap[slot]) 138 return -EIO; 139 140 return slot; 141 } 142 143 static bool udf_add_free_space(struct udf_sb_info *sbi, 144 u16 partition, u32 cnt) 145 { 146 struct logicalVolIntegrityDesc *lvid; 147 148 if (sbi->s_lvid_bh == NULL) 149 return false; 150 151 lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data; 152 lvid->freeSpaceTable[partition] = cpu_to_le32(le32_to_cpu( 153 lvid->freeSpaceTable[partition]) + cnt); 154 return true; 155 } 156 157 static void udf_bitmap_free_blocks(struct super_block *sb, 158 struct inode *inode, 159 struct udf_bitmap *bitmap, 160 kernel_lb_addr bloc, uint32_t offset, 161 uint32_t count) 162 { 163 struct udf_sb_info *sbi = UDF_SB(sb); 164 struct buffer_head *bh = NULL; 165 unsigned long block; 166 unsigned long block_group; 167 unsigned long bit; 168 unsigned long i; 169 int bitmap_nr; 170 unsigned long overflow; 171 172 mutex_lock(&sbi->s_alloc_mutex); 173 if (bloc.logicalBlockNum < 0 || 174 (bloc.logicalBlockNum + count) > 175 sbi->s_partmaps[bloc.partitionReferenceNum].s_partition_len) { 176 udf_debug("%d < %d || %d + %d > %d\n", 177 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count, 178 sbi->s_partmaps[bloc.partitionReferenceNum]. 179 s_partition_len); 180 goto error_return; 181 } 182 183 block = bloc.logicalBlockNum + offset + 184 (sizeof(struct spaceBitmapDesc) << 3); 185 186 do { 187 overflow = 0; 188 block_group = block >> (sb->s_blocksize_bits + 3); 189 bit = block % (sb->s_blocksize << 3); 190 191 /* 192 * Check to see if we are freeing blocks across a group boundary. 193 */ 194 if (bit + count > (sb->s_blocksize << 3)) { 195 overflow = bit + count - (sb->s_blocksize << 3); 196 count -= overflow; 197 } 198 bitmap_nr = load_block_bitmap(sb, bitmap, block_group); 199 if (bitmap_nr < 0) 200 goto error_return; 201 202 bh = bitmap->s_block_bitmap[bitmap_nr]; 203 for (i = 0; i < count; i++) { 204 if (udf_set_bit(bit + i, bh->b_data)) { 205 udf_debug("bit %ld already set\n", bit + i); 206 udf_debug("byte=%2x\n", 207 ((char *)bh->b_data)[(bit + i) >> 3]); 208 } else { 209 if (inode) 210 DQUOT_FREE_BLOCK(inode, 1); 211 udf_add_free_space(sbi, sbi->s_partition, 1); 212 } 213 } 214 mark_buffer_dirty(bh); 215 if (overflow) { 216 block += count; 217 count = overflow; 218 } 219 } while (overflow); 220 221 error_return: 222 sb->s_dirt = 1; 223 if (sbi->s_lvid_bh) 224 mark_buffer_dirty(sbi->s_lvid_bh); 225 mutex_unlock(&sbi->s_alloc_mutex); 226 } 227 228 static int udf_bitmap_prealloc_blocks(struct super_block *sb, 229 struct inode *inode, 230 struct udf_bitmap *bitmap, 231 uint16_t partition, uint32_t first_block, 232 uint32_t block_count) 233 { 234 struct udf_sb_info *sbi = UDF_SB(sb); 235 int alloc_count = 0; 236 int bit, block, block_group, group_start; 237 int nr_groups, bitmap_nr; 238 struct buffer_head *bh; 239 __u32 part_len; 240 241 mutex_lock(&sbi->s_alloc_mutex); 242 part_len = sbi->s_partmaps[partition].s_partition_len; 243 if (first_block < 0 || first_block >= part_len) 244 goto out; 245 246 if (first_block + block_count > part_len) 247 block_count = part_len - first_block; 248 249 do { 250 nr_groups = udf_compute_nr_groups(sb, partition); 251 block = first_block + (sizeof(struct spaceBitmapDesc) << 3); 252 block_group = block >> (sb->s_blocksize_bits + 3); 253 group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc); 254 255 bitmap_nr = load_block_bitmap(sb, bitmap, block_group); 256 if (bitmap_nr < 0) 257 goto out; 258 bh = bitmap->s_block_bitmap[bitmap_nr]; 259 260 bit = block % (sb->s_blocksize << 3); 261 262 while (bit < (sb->s_blocksize << 3) && block_count > 0) { 263 if (!udf_test_bit(bit, bh->b_data)) 264 goto out; 265 else if (DQUOT_PREALLOC_BLOCK(inode, 1)) 266 goto out; 267 else if (!udf_clear_bit(bit, bh->b_data)) { 268 udf_debug("bit already cleared for block %d\n", bit); 269 DQUOT_FREE_BLOCK(inode, 1); 270 goto out; 271 } 272 block_count--; 273 alloc_count++; 274 bit++; 275 block++; 276 } 277 mark_buffer_dirty(bh); 278 } while (block_count > 0); 279 280 out: 281 if (udf_add_free_space(sbi, partition, -alloc_count)) 282 mark_buffer_dirty(sbi->s_lvid_bh); 283 sb->s_dirt = 1; 284 mutex_unlock(&sbi->s_alloc_mutex); 285 return alloc_count; 286 } 287 288 static int udf_bitmap_new_block(struct super_block *sb, 289 struct inode *inode, 290 struct udf_bitmap *bitmap, uint16_t partition, 291 uint32_t goal, int *err) 292 { 293 struct udf_sb_info *sbi = UDF_SB(sb); 294 int newbit, bit = 0, block, block_group, group_start; 295 int end_goal, nr_groups, bitmap_nr, i; 296 struct buffer_head *bh = NULL; 297 char *ptr; 298 int newblock = 0; 299 300 *err = -ENOSPC; 301 mutex_lock(&sbi->s_alloc_mutex); 302 303 repeat: 304 if (goal < 0 || goal >= sbi->s_partmaps[partition].s_partition_len) 305 goal = 0; 306 307 nr_groups = bitmap->s_nr_groups; 308 block = goal + (sizeof(struct spaceBitmapDesc) << 3); 309 block_group = block >> (sb->s_blocksize_bits + 3); 310 group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc); 311 312 bitmap_nr = load_block_bitmap(sb, bitmap, block_group); 313 if (bitmap_nr < 0) 314 goto error_return; 315 bh = bitmap->s_block_bitmap[bitmap_nr]; 316 ptr = memscan((char *)bh->b_data + group_start, 0xFF, 317 sb->s_blocksize - group_start); 318 319 if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) { 320 bit = block % (sb->s_blocksize << 3); 321 if (udf_test_bit(bit, bh->b_data)) 322 goto got_block; 323 324 end_goal = (bit + 63) & ~63; 325 bit = udf_find_next_one_bit(bh->b_data, end_goal, bit); 326 if (bit < end_goal) 327 goto got_block; 328 329 ptr = memscan((char *)bh->b_data + (bit >> 3), 0xFF, 330 sb->s_blocksize - ((bit + 7) >> 3)); 331 newbit = (ptr - ((char *)bh->b_data)) << 3; 332 if (newbit < sb->s_blocksize << 3) { 333 bit = newbit; 334 goto search_back; 335 } 336 337 newbit = udf_find_next_one_bit(bh->b_data, 338 sb->s_blocksize << 3, bit); 339 if (newbit < sb->s_blocksize << 3) { 340 bit = newbit; 341 goto got_block; 342 } 343 } 344 345 for (i = 0; i < (nr_groups * 2); i++) { 346 block_group++; 347 if (block_group >= nr_groups) 348 block_group = 0; 349 group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc); 350 351 bitmap_nr = load_block_bitmap(sb, bitmap, block_group); 352 if (bitmap_nr < 0) 353 goto error_return; 354 bh = bitmap->s_block_bitmap[bitmap_nr]; 355 if (i < nr_groups) { 356 ptr = memscan((char *)bh->b_data + group_start, 0xFF, 357 sb->s_blocksize - group_start); 358 if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) { 359 bit = (ptr - ((char *)bh->b_data)) << 3; 360 break; 361 } 362 } else { 363 bit = udf_find_next_one_bit((char *)bh->b_data, 364 sb->s_blocksize << 3, 365 group_start << 3); 366 if (bit < sb->s_blocksize << 3) 367 break; 368 } 369 } 370 if (i >= (nr_groups * 2)) { 371 mutex_unlock(&sbi->s_alloc_mutex); 372 return newblock; 373 } 374 if (bit < sb->s_blocksize << 3) 375 goto search_back; 376 else 377 bit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3, 378 group_start << 3); 379 if (bit >= sb->s_blocksize << 3) { 380 mutex_unlock(&sbi->s_alloc_mutex); 381 return 0; 382 } 383 384 search_back: 385 i = 0; 386 while (i < 7 && bit > (group_start << 3) && 387 udf_test_bit(bit - 1, bh->b_data)) { 388 ++i; 389 --bit; 390 } 391 392 got_block: 393 394 /* 395 * Check quota for allocation of this block. 396 */ 397 if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) { 398 mutex_unlock(&sbi->s_alloc_mutex); 399 *err = -EDQUOT; 400 return 0; 401 } 402 403 newblock = bit + (block_group << (sb->s_blocksize_bits + 3)) - 404 (sizeof(struct spaceBitmapDesc) << 3); 405 406 if (!udf_clear_bit(bit, bh->b_data)) { 407 udf_debug("bit already cleared for block %d\n", bit); 408 goto repeat; 409 } 410 411 mark_buffer_dirty(bh); 412 413 if (udf_add_free_space(sbi, partition, -1)) 414 mark_buffer_dirty(sbi->s_lvid_bh); 415 sb->s_dirt = 1; 416 mutex_unlock(&sbi->s_alloc_mutex); 417 *err = 0; 418 return newblock; 419 420 error_return: 421 *err = -EIO; 422 mutex_unlock(&sbi->s_alloc_mutex); 423 return 0; 424 } 425 426 static void udf_table_free_blocks(struct super_block *sb, 427 struct inode *inode, 428 struct inode *table, 429 kernel_lb_addr bloc, uint32_t offset, 430 uint32_t count) 431 { 432 struct udf_sb_info *sbi = UDF_SB(sb); 433 uint32_t start, end; 434 uint32_t elen; 435 kernel_lb_addr eloc; 436 struct extent_position oepos, epos; 437 int8_t etype; 438 int i; 439 struct udf_inode_info *iinfo; 440 441 mutex_lock(&sbi->s_alloc_mutex); 442 if (bloc.logicalBlockNum < 0 || 443 (bloc.logicalBlockNum + count) > 444 sbi->s_partmaps[bloc.partitionReferenceNum].s_partition_len) { 445 udf_debug("%d < %d || %d + %d > %d\n", 446 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count, 447 sbi->s_partmaps[bloc.partitionReferenceNum]. 448 s_partition_len); 449 goto error_return; 450 } 451 452 iinfo = UDF_I(table); 453 /* We do this up front - There are some error conditions that 454 could occure, but.. oh well */ 455 if (inode) 456 DQUOT_FREE_BLOCK(inode, count); 457 if (udf_add_free_space(sbi, sbi->s_partition, count)) 458 mark_buffer_dirty(sbi->s_lvid_bh); 459 460 start = bloc.logicalBlockNum + offset; 461 end = bloc.logicalBlockNum + offset + count - 1; 462 463 epos.offset = oepos.offset = sizeof(struct unallocSpaceEntry); 464 elen = 0; 465 epos.block = oepos.block = iinfo->i_location; 466 epos.bh = oepos.bh = NULL; 467 468 while (count && 469 (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) { 470 if (((eloc.logicalBlockNum + 471 (elen >> sb->s_blocksize_bits)) == start)) { 472 if ((0x3FFFFFFF - elen) < 473 (count << sb->s_blocksize_bits)) { 474 uint32_t tmp = ((0x3FFFFFFF - elen) >> 475 sb->s_blocksize_bits); 476 count -= tmp; 477 start += tmp; 478 elen = (etype << 30) | 479 (0x40000000 - sb->s_blocksize); 480 } else { 481 elen = (etype << 30) | 482 (elen + 483 (count << sb->s_blocksize_bits)); 484 start += count; 485 count = 0; 486 } 487 udf_write_aext(table, &oepos, eloc, elen, 1); 488 } else if (eloc.logicalBlockNum == (end + 1)) { 489 if ((0x3FFFFFFF - elen) < 490 (count << sb->s_blocksize_bits)) { 491 uint32_t tmp = ((0x3FFFFFFF - elen) >> 492 sb->s_blocksize_bits); 493 count -= tmp; 494 end -= tmp; 495 eloc.logicalBlockNum -= tmp; 496 elen = (etype << 30) | 497 (0x40000000 - sb->s_blocksize); 498 } else { 499 eloc.logicalBlockNum = start; 500 elen = (etype << 30) | 501 (elen + 502 (count << sb->s_blocksize_bits)); 503 end -= count; 504 count = 0; 505 } 506 udf_write_aext(table, &oepos, eloc, elen, 1); 507 } 508 509 if (epos.bh != oepos.bh) { 510 i = -1; 511 oepos.block = epos.block; 512 brelse(oepos.bh); 513 get_bh(epos.bh); 514 oepos.bh = epos.bh; 515 oepos.offset = 0; 516 } else { 517 oepos.offset = epos.offset; 518 } 519 } 520 521 if (count) { 522 /* 523 * NOTE: we CANNOT use udf_add_aext here, as it can try to 524 * allocate a new block, and since we hold the super block 525 * lock already very bad things would happen :) 526 * 527 * We copy the behavior of udf_add_aext, but instead of 528 * trying to allocate a new block close to the existing one, 529 * we just steal a block from the extent we are trying to add. 530 * 531 * It would be nice if the blocks were close together, but it 532 * isn't required. 533 */ 534 535 int adsize; 536 short_ad *sad = NULL; 537 long_ad *lad = NULL; 538 struct allocExtDesc *aed; 539 540 eloc.logicalBlockNum = start; 541 elen = EXT_RECORDED_ALLOCATED | 542 (count << sb->s_blocksize_bits); 543 544 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) 545 adsize = sizeof(short_ad); 546 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) 547 adsize = sizeof(long_ad); 548 else { 549 brelse(oepos.bh); 550 brelse(epos.bh); 551 goto error_return; 552 } 553 554 if (epos.offset + (2 * adsize) > sb->s_blocksize) { 555 char *sptr, *dptr; 556 int loffset; 557 558 brelse(oepos.bh); 559 oepos = epos; 560 561 /* Steal a block from the extent being free'd */ 562 epos.block.logicalBlockNum = eloc.logicalBlockNum; 563 eloc.logicalBlockNum++; 564 elen -= sb->s_blocksize; 565 566 epos.bh = udf_tread(sb, 567 udf_get_lb_pblock(sb, epos.block, 0)); 568 if (!epos.bh) { 569 brelse(oepos.bh); 570 goto error_return; 571 } 572 aed = (struct allocExtDesc *)(epos.bh->b_data); 573 aed->previousAllocExtLocation = 574 cpu_to_le32(oepos.block.logicalBlockNum); 575 if (epos.offset + adsize > sb->s_blocksize) { 576 loffset = epos.offset; 577 aed->lengthAllocDescs = cpu_to_le32(adsize); 578 sptr = iinfo->i_ext.i_data + epos.offset 579 - adsize; 580 dptr = epos.bh->b_data + 581 sizeof(struct allocExtDesc); 582 memcpy(dptr, sptr, adsize); 583 epos.offset = sizeof(struct allocExtDesc) + 584 adsize; 585 } else { 586 loffset = epos.offset + adsize; 587 aed->lengthAllocDescs = cpu_to_le32(0); 588 if (oepos.bh) { 589 sptr = oepos.bh->b_data + epos.offset; 590 aed = (struct allocExtDesc *) 591 oepos.bh->b_data; 592 aed->lengthAllocDescs = 593 cpu_to_le32(le32_to_cpu( 594 aed->lengthAllocDescs) + 595 adsize); 596 } else { 597 sptr = iinfo->i_ext.i_data + 598 epos.offset; 599 iinfo->i_lenAlloc += adsize; 600 mark_inode_dirty(table); 601 } 602 epos.offset = sizeof(struct allocExtDesc); 603 } 604 if (sbi->s_udfrev >= 0x0200) 605 udf_new_tag(epos.bh->b_data, TAG_IDENT_AED, 606 3, 1, epos.block.logicalBlockNum, 607 sizeof(tag)); 608 else 609 udf_new_tag(epos.bh->b_data, TAG_IDENT_AED, 610 2, 1, epos.block.logicalBlockNum, 611 sizeof(tag)); 612 613 switch (iinfo->i_alloc_type) { 614 case ICBTAG_FLAG_AD_SHORT: 615 sad = (short_ad *)sptr; 616 sad->extLength = cpu_to_le32( 617 EXT_NEXT_EXTENT_ALLOCDECS | 618 sb->s_blocksize); 619 sad->extPosition = 620 cpu_to_le32(epos.block.logicalBlockNum); 621 break; 622 case ICBTAG_FLAG_AD_LONG: 623 lad = (long_ad *)sptr; 624 lad->extLength = cpu_to_le32( 625 EXT_NEXT_EXTENT_ALLOCDECS | 626 sb->s_blocksize); 627 lad->extLocation = 628 cpu_to_lelb(epos.block); 629 break; 630 } 631 if (oepos.bh) { 632 udf_update_tag(oepos.bh->b_data, loffset); 633 mark_buffer_dirty(oepos.bh); 634 } else { 635 mark_inode_dirty(table); 636 } 637 } 638 639 /* It's possible that stealing the block emptied the extent */ 640 if (elen) { 641 udf_write_aext(table, &epos, eloc, elen, 1); 642 643 if (!epos.bh) { 644 iinfo->i_lenAlloc += adsize; 645 mark_inode_dirty(table); 646 } else { 647 aed = (struct allocExtDesc *)epos.bh->b_data; 648 aed->lengthAllocDescs = 649 cpu_to_le32(le32_to_cpu( 650 aed->lengthAllocDescs) + adsize); 651 udf_update_tag(epos.bh->b_data, epos.offset); 652 mark_buffer_dirty(epos.bh); 653 } 654 } 655 } 656 657 brelse(epos.bh); 658 brelse(oepos.bh); 659 660 error_return: 661 sb->s_dirt = 1; 662 mutex_unlock(&sbi->s_alloc_mutex); 663 return; 664 } 665 666 static int udf_table_prealloc_blocks(struct super_block *sb, 667 struct inode *inode, 668 struct inode *table, uint16_t partition, 669 uint32_t first_block, uint32_t block_count) 670 { 671 struct udf_sb_info *sbi = UDF_SB(sb); 672 int alloc_count = 0; 673 uint32_t elen, adsize; 674 kernel_lb_addr eloc; 675 struct extent_position epos; 676 int8_t etype = -1; 677 struct udf_inode_info *iinfo; 678 679 if (first_block < 0 || 680 first_block >= sbi->s_partmaps[partition].s_partition_len) 681 return 0; 682 683 iinfo = UDF_I(table); 684 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) 685 adsize = sizeof(short_ad); 686 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) 687 adsize = sizeof(long_ad); 688 else 689 return 0; 690 691 mutex_lock(&sbi->s_alloc_mutex); 692 epos.offset = sizeof(struct unallocSpaceEntry); 693 epos.block = iinfo->i_location; 694 epos.bh = NULL; 695 eloc.logicalBlockNum = 0xFFFFFFFF; 696 697 while (first_block != eloc.logicalBlockNum && 698 (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) { 699 udf_debug("eloc=%d, elen=%d, first_block=%d\n", 700 eloc.logicalBlockNum, elen, first_block); 701 ; /* empty loop body */ 702 } 703 704 if (first_block == eloc.logicalBlockNum) { 705 epos.offset -= adsize; 706 707 alloc_count = (elen >> sb->s_blocksize_bits); 708 if (inode && DQUOT_PREALLOC_BLOCK(inode, 709 alloc_count > block_count ? block_count : alloc_count)) 710 alloc_count = 0; 711 else if (alloc_count > block_count) { 712 alloc_count = block_count; 713 eloc.logicalBlockNum += alloc_count; 714 elen -= (alloc_count << sb->s_blocksize_bits); 715 udf_write_aext(table, &epos, eloc, 716 (etype << 30) | elen, 1); 717 } else 718 udf_delete_aext(table, epos, eloc, 719 (etype << 30) | elen); 720 } else { 721 alloc_count = 0; 722 } 723 724 brelse(epos.bh); 725 726 if (alloc_count && udf_add_free_space(sbi, partition, -alloc_count)) { 727 mark_buffer_dirty(sbi->s_lvid_bh); 728 sb->s_dirt = 1; 729 } 730 mutex_unlock(&sbi->s_alloc_mutex); 731 return alloc_count; 732 } 733 734 static int udf_table_new_block(struct super_block *sb, 735 struct inode *inode, 736 struct inode *table, uint16_t partition, 737 uint32_t goal, int *err) 738 { 739 struct udf_sb_info *sbi = UDF_SB(sb); 740 uint32_t spread = 0xFFFFFFFF, nspread = 0xFFFFFFFF; 741 uint32_t newblock = 0, adsize; 742 uint32_t elen, goal_elen = 0; 743 kernel_lb_addr eloc, uninitialized_var(goal_eloc); 744 struct extent_position epos, goal_epos; 745 int8_t etype; 746 struct udf_inode_info *iinfo = UDF_I(table); 747 748 *err = -ENOSPC; 749 750 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) 751 adsize = sizeof(short_ad); 752 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) 753 adsize = sizeof(long_ad); 754 else 755 return newblock; 756 757 mutex_lock(&sbi->s_alloc_mutex); 758 if (goal < 0 || goal >= sbi->s_partmaps[partition].s_partition_len) 759 goal = 0; 760 761 /* We search for the closest matching block to goal. If we find 762 a exact hit, we stop. Otherwise we keep going till we run out 763 of extents. We store the buffer_head, bloc, and extoffset 764 of the current closest match and use that when we are done. 765 */ 766 epos.offset = sizeof(struct unallocSpaceEntry); 767 epos.block = iinfo->i_location; 768 epos.bh = goal_epos.bh = NULL; 769 770 while (spread && 771 (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) { 772 if (goal >= eloc.logicalBlockNum) { 773 if (goal < eloc.logicalBlockNum + 774 (elen >> sb->s_blocksize_bits)) 775 nspread = 0; 776 else 777 nspread = goal - eloc.logicalBlockNum - 778 (elen >> sb->s_blocksize_bits); 779 } else { 780 nspread = eloc.logicalBlockNum - goal; 781 } 782 783 if (nspread < spread) { 784 spread = nspread; 785 if (goal_epos.bh != epos.bh) { 786 brelse(goal_epos.bh); 787 goal_epos.bh = epos.bh; 788 get_bh(goal_epos.bh); 789 } 790 goal_epos.block = epos.block; 791 goal_epos.offset = epos.offset - adsize; 792 goal_eloc = eloc; 793 goal_elen = (etype << 30) | elen; 794 } 795 } 796 797 brelse(epos.bh); 798 799 if (spread == 0xFFFFFFFF) { 800 brelse(goal_epos.bh); 801 mutex_unlock(&sbi->s_alloc_mutex); 802 return 0; 803 } 804 805 /* Only allocate blocks from the beginning of the extent. 806 That way, we only delete (empty) extents, never have to insert an 807 extent because of splitting */ 808 /* This works, but very poorly.... */ 809 810 newblock = goal_eloc.logicalBlockNum; 811 goal_eloc.logicalBlockNum++; 812 goal_elen -= sb->s_blocksize; 813 814 if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) { 815 brelse(goal_epos.bh); 816 mutex_unlock(&sbi->s_alloc_mutex); 817 *err = -EDQUOT; 818 return 0; 819 } 820 821 if (goal_elen) 822 udf_write_aext(table, &goal_epos, goal_eloc, goal_elen, 1); 823 else 824 udf_delete_aext(table, goal_epos, goal_eloc, goal_elen); 825 brelse(goal_epos.bh); 826 827 if (udf_add_free_space(sbi, partition, -1)) 828 mark_buffer_dirty(sbi->s_lvid_bh); 829 830 sb->s_dirt = 1; 831 mutex_unlock(&sbi->s_alloc_mutex); 832 *err = 0; 833 return newblock; 834 } 835 836 inline void udf_free_blocks(struct super_block *sb, 837 struct inode *inode, 838 kernel_lb_addr bloc, uint32_t offset, 839 uint32_t count) 840 { 841 uint16_t partition = bloc.partitionReferenceNum; 842 struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition]; 843 844 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) { 845 return udf_bitmap_free_blocks(sb, inode, 846 map->s_uspace.s_bitmap, 847 bloc, offset, count); 848 } else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) { 849 return udf_table_free_blocks(sb, inode, 850 map->s_uspace.s_table, 851 bloc, offset, count); 852 } else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) { 853 return udf_bitmap_free_blocks(sb, inode, 854 map->s_fspace.s_bitmap, 855 bloc, offset, count); 856 } else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) { 857 return udf_table_free_blocks(sb, inode, 858 map->s_fspace.s_table, 859 bloc, offset, count); 860 } else { 861 return; 862 } 863 } 864 865 inline int udf_prealloc_blocks(struct super_block *sb, 866 struct inode *inode, 867 uint16_t partition, uint32_t first_block, 868 uint32_t block_count) 869 { 870 struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition]; 871 872 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) 873 return udf_bitmap_prealloc_blocks(sb, inode, 874 map->s_uspace.s_bitmap, 875 partition, first_block, 876 block_count); 877 else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) 878 return udf_table_prealloc_blocks(sb, inode, 879 map->s_uspace.s_table, 880 partition, first_block, 881 block_count); 882 else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) 883 return udf_bitmap_prealloc_blocks(sb, inode, 884 map->s_fspace.s_bitmap, 885 partition, first_block, 886 block_count); 887 else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) 888 return udf_table_prealloc_blocks(sb, inode, 889 map->s_fspace.s_table, 890 partition, first_block, 891 block_count); 892 else 893 return 0; 894 } 895 896 inline int udf_new_block(struct super_block *sb, 897 struct inode *inode, 898 uint16_t partition, uint32_t goal, int *err) 899 { 900 struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition]; 901 902 if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) 903 return udf_bitmap_new_block(sb, inode, 904 map->s_uspace.s_bitmap, 905 partition, goal, err); 906 else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) 907 return udf_table_new_block(sb, inode, 908 map->s_uspace.s_table, 909 partition, goal, err); 910 else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) 911 return udf_bitmap_new_block(sb, inode, 912 map->s_fspace.s_bitmap, 913 partition, goal, err); 914 else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) 915 return udf_table_new_block(sb, inode, 916 map->s_fspace.s_table, 917 partition, goal, err); 918 else { 919 *err = -EIO; 920 return 0; 921 } 922 } 923