1 /* 2 * balloc.c 3 * 4 * PURPOSE 5 * Block allocation handling routines for the OSTA-UDF(tm) filesystem. 6 * 7 * CONTACTS 8 * E-mail regarding any portion of the Linux UDF file system should be 9 * directed to the development team mailing list (run by majordomo): 10 * linux_udf@hpesjro.fc.hp.com 11 * 12 * COPYRIGHT 13 * This file is distributed under the terms of the GNU General Public 14 * License (GPL). Copies of the GPL can be obtained from: 15 * ftp://prep.ai.mit.edu/pub/gnu/GPL 16 * Each contributing author retains all rights to their own work. 17 * 18 * (C) 1999-2001 Ben Fennema 19 * (C) 1999 Stelias Computing Inc 20 * 21 * HISTORY 22 * 23 * 02/24/99 blf Created. 24 * 25 */ 26 27 #include "udfdecl.h" 28 29 #include <linux/quotaops.h> 30 #include <linux/buffer_head.h> 31 #include <linux/bitops.h> 32 33 #include "udf_i.h" 34 #include "udf_sb.h" 35 36 #define udf_clear_bit(nr,addr) ext2_clear_bit(nr,addr) 37 #define udf_set_bit(nr,addr) ext2_set_bit(nr,addr) 38 #define udf_test_bit(nr, addr) ext2_test_bit(nr, addr) 39 #define udf_find_first_one_bit(addr, size) find_first_one_bit(addr, size) 40 #define udf_find_next_one_bit(addr, size, offset) find_next_one_bit(addr, size, offset) 41 42 #define leBPL_to_cpup(x) leNUM_to_cpup(BITS_PER_LONG, x) 43 #define leNUM_to_cpup(x,y) xleNUM_to_cpup(x,y) 44 #define xleNUM_to_cpup(x,y) (le ## x ## _to_cpup(y)) 45 #define uintBPL_t uint(BITS_PER_LONG) 46 #define uint(x) xuint(x) 47 #define xuint(x) __le ## x 48 49 extern inline int find_next_one_bit (void * addr, int size, int offset) 50 { 51 uintBPL_t * p = ((uintBPL_t *) addr) + (offset / BITS_PER_LONG); 52 int result = offset & ~(BITS_PER_LONG-1); 53 unsigned long tmp; 54 55 if (offset >= size) 56 return size; 57 size -= result; 58 offset &= (BITS_PER_LONG-1); 59 if (offset) 60 { 61 tmp = leBPL_to_cpup(p++); 62 tmp &= ~0UL << offset; 63 if (size < BITS_PER_LONG) 64 goto found_first; 65 if (tmp) 66 goto found_middle; 67 size -= BITS_PER_LONG; 68 result += BITS_PER_LONG; 69 } 70 while (size & ~(BITS_PER_LONG-1)) 71 { 72 if ((tmp = leBPL_to_cpup(p++))) 73 goto found_middle; 74 result += BITS_PER_LONG; 75 size -= BITS_PER_LONG; 76 } 77 if (!size) 78 return result; 79 tmp = leBPL_to_cpup(p); 80 found_first: 81 tmp &= ~0UL >> (BITS_PER_LONG-size); 82 found_middle: 83 return result + ffz(~tmp); 84 } 85 86 #define find_first_one_bit(addr, size)\ 87 find_next_one_bit((addr), (size), 0) 88 89 static int read_block_bitmap(struct super_block * sb, 90 struct udf_bitmap *bitmap, unsigned int block, unsigned long bitmap_nr) 91 { 92 struct buffer_head *bh = NULL; 93 int retval = 0; 94 kernel_lb_addr loc; 95 96 loc.logicalBlockNum = bitmap->s_extPosition; 97 loc.partitionReferenceNum = UDF_SB_PARTITION(sb); 98 99 bh = udf_tread(sb, udf_get_lb_pblock(sb, loc, block)); 100 if (!bh) 101 { 102 retval = -EIO; 103 } 104 bitmap->s_block_bitmap[bitmap_nr] = bh; 105 return retval; 106 } 107 108 static int __load_block_bitmap(struct super_block * sb, 109 struct udf_bitmap *bitmap, unsigned int block_group) 110 { 111 int retval = 0; 112 int nr_groups = bitmap->s_nr_groups; 113 114 if (block_group >= nr_groups) 115 { 116 udf_debug("block_group (%d) > nr_groups (%d)\n", block_group, nr_groups); 117 } 118 119 if (bitmap->s_block_bitmap[block_group]) 120 return block_group; 121 else 122 { 123 retval = read_block_bitmap(sb, bitmap, block_group, block_group); 124 if (retval < 0) 125 return retval; 126 return block_group; 127 } 128 } 129 130 static inline int load_block_bitmap(struct super_block * sb, 131 struct udf_bitmap *bitmap, unsigned int block_group) 132 { 133 int slot; 134 135 slot = __load_block_bitmap(sb, bitmap, block_group); 136 137 if (slot < 0) 138 return slot; 139 140 if (!bitmap->s_block_bitmap[slot]) 141 return -EIO; 142 143 return slot; 144 } 145 146 static void udf_bitmap_free_blocks(struct super_block * sb, 147 struct inode * inode, 148 struct udf_bitmap *bitmap, 149 kernel_lb_addr bloc, uint32_t offset, uint32_t count) 150 { 151 struct udf_sb_info *sbi = UDF_SB(sb); 152 struct buffer_head * bh = NULL; 153 unsigned long block; 154 unsigned long block_group; 155 unsigned long bit; 156 unsigned long i; 157 int bitmap_nr; 158 unsigned long overflow; 159 160 down(&sbi->s_alloc_sem); 161 if (bloc.logicalBlockNum < 0 || 162 (bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum)) 163 { 164 udf_debug("%d < %d || %d + %d > %d\n", 165 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count, 166 UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum)); 167 goto error_return; 168 } 169 170 block = bloc.logicalBlockNum + offset + (sizeof(struct spaceBitmapDesc) << 3); 171 172 do_more: 173 overflow = 0; 174 block_group = block >> (sb->s_blocksize_bits + 3); 175 bit = block % (sb->s_blocksize << 3); 176 177 /* 178 * Check to see if we are freeing blocks across a group boundary. 179 */ 180 if (bit + count > (sb->s_blocksize << 3)) 181 { 182 overflow = bit + count - (sb->s_blocksize << 3); 183 count -= overflow; 184 } 185 bitmap_nr = load_block_bitmap(sb, bitmap, block_group); 186 if (bitmap_nr < 0) 187 goto error_return; 188 189 bh = bitmap->s_block_bitmap[bitmap_nr]; 190 for (i=0; i < count; i++) 191 { 192 if (udf_set_bit(bit + i, bh->b_data)) 193 { 194 udf_debug("bit %ld already set\n", bit + i); 195 udf_debug("byte=%2x\n", ((char *)bh->b_data)[(bit + i) >> 3]); 196 } 197 else 198 { 199 if (inode) 200 DQUOT_FREE_BLOCK(inode, 1); 201 if (UDF_SB_LVIDBH(sb)) 202 { 203 UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)] = 204 cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)])+1); 205 } 206 } 207 } 208 mark_buffer_dirty(bh); 209 if (overflow) 210 { 211 block += count; 212 count = overflow; 213 goto do_more; 214 } 215 error_return: 216 sb->s_dirt = 1; 217 if (UDF_SB_LVIDBH(sb)) 218 mark_buffer_dirty(UDF_SB_LVIDBH(sb)); 219 up(&sbi->s_alloc_sem); 220 return; 221 } 222 223 static int udf_bitmap_prealloc_blocks(struct super_block * sb, 224 struct inode * inode, 225 struct udf_bitmap *bitmap, uint16_t partition, uint32_t first_block, 226 uint32_t block_count) 227 { 228 struct udf_sb_info *sbi = UDF_SB(sb); 229 int alloc_count = 0; 230 int bit, block, block_group, group_start; 231 int nr_groups, bitmap_nr; 232 struct buffer_head *bh; 233 234 down(&sbi->s_alloc_sem); 235 if (first_block < 0 || first_block >= UDF_SB_PARTLEN(sb, partition)) 236 goto out; 237 238 if (first_block + block_count > UDF_SB_PARTLEN(sb, partition)) 239 block_count = UDF_SB_PARTLEN(sb, partition) - first_block; 240 241 repeat: 242 nr_groups = (UDF_SB_PARTLEN(sb, partition) + 243 (sizeof(struct spaceBitmapDesc) << 3) + (sb->s_blocksize * 8) - 1) / (sb->s_blocksize * 8); 244 block = first_block + (sizeof(struct spaceBitmapDesc) << 3); 245 block_group = block >> (sb->s_blocksize_bits + 3); 246 group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc); 247 248 bitmap_nr = load_block_bitmap(sb, bitmap, block_group); 249 if (bitmap_nr < 0) 250 goto out; 251 bh = bitmap->s_block_bitmap[bitmap_nr]; 252 253 bit = block % (sb->s_blocksize << 3); 254 255 while (bit < (sb->s_blocksize << 3) && block_count > 0) 256 { 257 if (!udf_test_bit(bit, bh->b_data)) 258 goto out; 259 else if (DQUOT_PREALLOC_BLOCK(inode, 1)) 260 goto out; 261 else if (!udf_clear_bit(bit, bh->b_data)) 262 { 263 udf_debug("bit already cleared for block %d\n", bit); 264 DQUOT_FREE_BLOCK(inode, 1); 265 goto out; 266 } 267 block_count --; 268 alloc_count ++; 269 bit ++; 270 block ++; 271 } 272 mark_buffer_dirty(bh); 273 if (block_count > 0) 274 goto repeat; 275 out: 276 if (UDF_SB_LVIDBH(sb)) 277 { 278 UDF_SB_LVID(sb)->freeSpaceTable[partition] = 279 cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition])-alloc_count); 280 mark_buffer_dirty(UDF_SB_LVIDBH(sb)); 281 } 282 sb->s_dirt = 1; 283 up(&sbi->s_alloc_sem); 284 return alloc_count; 285 } 286 287 static int udf_bitmap_new_block(struct super_block * sb, 288 struct inode * inode, 289 struct udf_bitmap *bitmap, uint16_t partition, uint32_t goal, int *err) 290 { 291 struct udf_sb_info *sbi = UDF_SB(sb); 292 int newbit, bit=0, block, block_group, group_start; 293 int end_goal, nr_groups, bitmap_nr, i; 294 struct buffer_head *bh = NULL; 295 char *ptr; 296 int newblock = 0; 297 298 *err = -ENOSPC; 299 down(&sbi->s_alloc_sem); 300 301 repeat: 302 if (goal < 0 || goal >= UDF_SB_PARTLEN(sb, partition)) 303 goal = 0; 304 305 nr_groups = bitmap->s_nr_groups; 306 block = goal + (sizeof(struct spaceBitmapDesc) << 3); 307 block_group = block >> (sb->s_blocksize_bits + 3); 308 group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc); 309 310 bitmap_nr = load_block_bitmap(sb, bitmap, block_group); 311 if (bitmap_nr < 0) 312 goto error_return; 313 bh = bitmap->s_block_bitmap[bitmap_nr]; 314 ptr = memscan((char *)bh->b_data + group_start, 0xFF, sb->s_blocksize - group_start); 315 316 if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) 317 { 318 bit = block % (sb->s_blocksize << 3); 319 320 if (udf_test_bit(bit, bh->b_data)) 321 { 322 goto got_block; 323 } 324 end_goal = (bit + 63) & ~63; 325 bit = udf_find_next_one_bit(bh->b_data, end_goal, bit); 326 if (bit < end_goal) 327 goto got_block; 328 ptr = memscan((char *)bh->b_data + (bit >> 3), 0xFF, sb->s_blocksize - ((bit + 7) >> 3)); 329 newbit = (ptr - ((char *)bh->b_data)) << 3; 330 if (newbit < sb->s_blocksize << 3) 331 { 332 bit = newbit; 333 goto search_back; 334 } 335 newbit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3, bit); 336 if (newbit < sb->s_blocksize << 3) 337 { 338 bit = newbit; 339 goto got_block; 340 } 341 } 342 343 for (i=0; i<(nr_groups*2); i++) 344 { 345 block_group ++; 346 if (block_group >= nr_groups) 347 block_group = 0; 348 group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc); 349 350 bitmap_nr = load_block_bitmap(sb, bitmap, block_group); 351 if (bitmap_nr < 0) 352 goto error_return; 353 bh = bitmap->s_block_bitmap[bitmap_nr]; 354 if (i < nr_groups) 355 { 356 ptr = memscan((char *)bh->b_data + group_start, 0xFF, sb->s_blocksize - group_start); 357 if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) 358 { 359 bit = (ptr - ((char *)bh->b_data)) << 3; 360 break; 361 } 362 } 363 else 364 { 365 bit = udf_find_next_one_bit((char *)bh->b_data, sb->s_blocksize << 3, group_start << 3); 366 if (bit < sb->s_blocksize << 3) 367 break; 368 } 369 } 370 if (i >= (nr_groups*2)) 371 { 372 up(&sbi->s_alloc_sem); 373 return newblock; 374 } 375 if (bit < sb->s_blocksize << 3) 376 goto search_back; 377 else 378 bit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3, group_start << 3); 379 if (bit >= sb->s_blocksize << 3) 380 { 381 up(&sbi->s_alloc_sem); 382 return 0; 383 } 384 385 search_back: 386 for (i=0; i<7 && bit > (group_start << 3) && udf_test_bit(bit - 1, bh->b_data); i++, bit--); 387 388 got_block: 389 390 /* 391 * Check quota for allocation of this block. 392 */ 393 if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) 394 { 395 up(&sbi->s_alloc_sem); 396 *err = -EDQUOT; 397 return 0; 398 } 399 400 newblock = bit + (block_group << (sb->s_blocksize_bits + 3)) - 401 (sizeof(struct spaceBitmapDesc) << 3); 402 403 if (!udf_clear_bit(bit, bh->b_data)) 404 { 405 udf_debug("bit already cleared for block %d\n", bit); 406 goto repeat; 407 } 408 409 mark_buffer_dirty(bh); 410 411 if (UDF_SB_LVIDBH(sb)) 412 { 413 UDF_SB_LVID(sb)->freeSpaceTable[partition] = 414 cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition])-1); 415 mark_buffer_dirty(UDF_SB_LVIDBH(sb)); 416 } 417 sb->s_dirt = 1; 418 up(&sbi->s_alloc_sem); 419 *err = 0; 420 return newblock; 421 422 error_return: 423 *err = -EIO; 424 up(&sbi->s_alloc_sem); 425 return 0; 426 } 427 428 static void udf_table_free_blocks(struct super_block * sb, 429 struct inode * inode, 430 struct inode * table, 431 kernel_lb_addr bloc, uint32_t offset, uint32_t count) 432 { 433 struct udf_sb_info *sbi = UDF_SB(sb); 434 uint32_t start, end; 435 uint32_t nextoffset, oextoffset, elen; 436 kernel_lb_addr nbloc, obloc, eloc; 437 struct buffer_head *obh, *nbh; 438 int8_t etype; 439 int i; 440 441 down(&sbi->s_alloc_sem); 442 if (bloc.logicalBlockNum < 0 || 443 (bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum)) 444 { 445 udf_debug("%d < %d || %d + %d > %d\n", 446 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count, 447 UDF_SB_PARTLEN(sb, bloc.partitionReferenceNum)); 448 goto error_return; 449 } 450 451 /* We do this up front - There are some error conditions that could occure, 452 but.. oh well */ 453 if (inode) 454 DQUOT_FREE_BLOCK(inode, count); 455 if (UDF_SB_LVIDBH(sb)) 456 { 457 UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)] = 458 cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)])+count); 459 mark_buffer_dirty(UDF_SB_LVIDBH(sb)); 460 } 461 462 start = bloc.logicalBlockNum + offset; 463 end = bloc.logicalBlockNum + offset + count - 1; 464 465 oextoffset = nextoffset = sizeof(struct unallocSpaceEntry); 466 elen = 0; 467 obloc = nbloc = UDF_I_LOCATION(table); 468 469 obh = nbh = NULL; 470 471 while (count && (etype = 472 udf_next_aext(table, &nbloc, &nextoffset, &eloc, &elen, &nbh, 1)) != -1) 473 { 474 if (((eloc.logicalBlockNum + (elen >> sb->s_blocksize_bits)) == 475 start)) 476 { 477 if ((0x3FFFFFFF - elen) < (count << sb->s_blocksize_bits)) 478 { 479 count -= ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits); 480 start += ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits); 481 elen = (etype << 30) | (0x40000000 - sb->s_blocksize); 482 } 483 else 484 { 485 elen = (etype << 30) | 486 (elen + (count << sb->s_blocksize_bits)); 487 start += count; 488 count = 0; 489 } 490 udf_write_aext(table, obloc, &oextoffset, eloc, elen, obh, 1); 491 } 492 else if (eloc.logicalBlockNum == (end + 1)) 493 { 494 if ((0x3FFFFFFF - elen) < (count << sb->s_blocksize_bits)) 495 { 496 count -= ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits); 497 end -= ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits); 498 eloc.logicalBlockNum -= 499 ((0x3FFFFFFF - elen) >> sb->s_blocksize_bits); 500 elen = (etype << 30) | (0x40000000 - sb->s_blocksize); 501 } 502 else 503 { 504 eloc.logicalBlockNum = start; 505 elen = (etype << 30) | 506 (elen + (count << sb->s_blocksize_bits)); 507 end -= count; 508 count = 0; 509 } 510 udf_write_aext(table, obloc, &oextoffset, eloc, elen, obh, 1); 511 } 512 513 if (nbh != obh) 514 { 515 i = -1; 516 obloc = nbloc; 517 udf_release_data(obh); 518 atomic_inc(&nbh->b_count); 519 obh = nbh; 520 oextoffset = 0; 521 } 522 else 523 oextoffset = nextoffset; 524 } 525 526 if (count) 527 { 528 /* NOTE: we CANNOT use udf_add_aext here, as it can try to allocate 529 a new block, and since we hold the super block lock already 530 very bad things would happen :) 531 532 We copy the behavior of udf_add_aext, but instead of 533 trying to allocate a new block close to the existing one, 534 we just steal a block from the extent we are trying to add. 535 536 It would be nice if the blocks were close together, but it 537 isn't required. 538 */ 539 540 int adsize; 541 short_ad *sad = NULL; 542 long_ad *lad = NULL; 543 struct allocExtDesc *aed; 544 545 eloc.logicalBlockNum = start; 546 elen = EXT_RECORDED_ALLOCATED | 547 (count << sb->s_blocksize_bits); 548 549 if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_SHORT) 550 adsize = sizeof(short_ad); 551 else if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_LONG) 552 adsize = sizeof(long_ad); 553 else 554 { 555 udf_release_data(obh); 556 udf_release_data(nbh); 557 goto error_return; 558 } 559 560 if (nextoffset + (2 * adsize) > sb->s_blocksize) 561 { 562 char *sptr, *dptr; 563 int loffset; 564 565 udf_release_data(obh); 566 obh = nbh; 567 obloc = nbloc; 568 oextoffset = nextoffset; 569 570 /* Steal a block from the extent being free'd */ 571 nbloc.logicalBlockNum = eloc.logicalBlockNum; 572 eloc.logicalBlockNum ++; 573 elen -= sb->s_blocksize; 574 575 if (!(nbh = udf_tread(sb, 576 udf_get_lb_pblock(sb, nbloc, 0)))) 577 { 578 udf_release_data(obh); 579 goto error_return; 580 } 581 aed = (struct allocExtDesc *)(nbh->b_data); 582 aed->previousAllocExtLocation = cpu_to_le32(obloc.logicalBlockNum); 583 if (nextoffset + adsize > sb->s_blocksize) 584 { 585 loffset = nextoffset; 586 aed->lengthAllocDescs = cpu_to_le32(adsize); 587 if (obh) 588 sptr = UDF_I_DATA(inode) + nextoffset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode) - adsize; 589 else 590 sptr = obh->b_data + nextoffset - adsize; 591 dptr = nbh->b_data + sizeof(struct allocExtDesc); 592 memcpy(dptr, sptr, adsize); 593 nextoffset = sizeof(struct allocExtDesc) + adsize; 594 } 595 else 596 { 597 loffset = nextoffset + adsize; 598 aed->lengthAllocDescs = cpu_to_le32(0); 599 sptr = (obh)->b_data + nextoffset; 600 nextoffset = sizeof(struct allocExtDesc); 601 602 if (obh) 603 { 604 aed = (struct allocExtDesc *)(obh)->b_data; 605 aed->lengthAllocDescs = 606 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize); 607 } 608 else 609 { 610 UDF_I_LENALLOC(table) += adsize; 611 mark_inode_dirty(table); 612 } 613 } 614 if (UDF_SB_UDFREV(sb) >= 0x0200) 615 udf_new_tag(nbh->b_data, TAG_IDENT_AED, 3, 1, 616 nbloc.logicalBlockNum, sizeof(tag)); 617 else 618 udf_new_tag(nbh->b_data, TAG_IDENT_AED, 2, 1, 619 nbloc.logicalBlockNum, sizeof(tag)); 620 switch (UDF_I_ALLOCTYPE(table)) 621 { 622 case ICBTAG_FLAG_AD_SHORT: 623 { 624 sad = (short_ad *)sptr; 625 sad->extLength = cpu_to_le32( 626 EXT_NEXT_EXTENT_ALLOCDECS | 627 sb->s_blocksize); 628 sad->extPosition = cpu_to_le32(nbloc.logicalBlockNum); 629 break; 630 } 631 case ICBTAG_FLAG_AD_LONG: 632 { 633 lad = (long_ad *)sptr; 634 lad->extLength = cpu_to_le32( 635 EXT_NEXT_EXTENT_ALLOCDECS | 636 sb->s_blocksize); 637 lad->extLocation = cpu_to_lelb(nbloc); 638 break; 639 } 640 } 641 if (obh) 642 { 643 udf_update_tag(obh->b_data, loffset); 644 mark_buffer_dirty(obh); 645 } 646 else 647 mark_inode_dirty(table); 648 } 649 650 if (elen) /* It's possible that stealing the block emptied the extent */ 651 { 652 udf_write_aext(table, nbloc, &nextoffset, eloc, elen, nbh, 1); 653 654 if (!nbh) 655 { 656 UDF_I_LENALLOC(table) += adsize; 657 mark_inode_dirty(table); 658 } 659 else 660 { 661 aed = (struct allocExtDesc *)nbh->b_data; 662 aed->lengthAllocDescs = 663 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize); 664 udf_update_tag(nbh->b_data, nextoffset); 665 mark_buffer_dirty(nbh); 666 } 667 } 668 } 669 670 udf_release_data(nbh); 671 udf_release_data(obh); 672 673 error_return: 674 sb->s_dirt = 1; 675 up(&sbi->s_alloc_sem); 676 return; 677 } 678 679 static int udf_table_prealloc_blocks(struct super_block * sb, 680 struct inode * inode, 681 struct inode *table, uint16_t partition, uint32_t first_block, 682 uint32_t block_count) 683 { 684 struct udf_sb_info *sbi = UDF_SB(sb); 685 int alloc_count = 0; 686 uint32_t extoffset, elen, adsize; 687 kernel_lb_addr bloc, eloc; 688 struct buffer_head *bh; 689 int8_t etype = -1; 690 691 if (first_block < 0 || first_block >= UDF_SB_PARTLEN(sb, partition)) 692 return 0; 693 694 if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_SHORT) 695 adsize = sizeof(short_ad); 696 else if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_LONG) 697 adsize = sizeof(long_ad); 698 else 699 return 0; 700 701 down(&sbi->s_alloc_sem); 702 extoffset = sizeof(struct unallocSpaceEntry); 703 bloc = UDF_I_LOCATION(table); 704 705 bh = NULL; 706 eloc.logicalBlockNum = 0xFFFFFFFF; 707 708 while (first_block != eloc.logicalBlockNum && (etype = 709 udf_next_aext(table, &bloc, &extoffset, &eloc, &elen, &bh, 1)) != -1) 710 { 711 udf_debug("eloc=%d, elen=%d, first_block=%d\n", 712 eloc.logicalBlockNum, elen, first_block); 713 ; /* empty loop body */ 714 } 715 716 if (first_block == eloc.logicalBlockNum) 717 { 718 extoffset -= adsize; 719 720 alloc_count = (elen >> sb->s_blocksize_bits); 721 if (inode && DQUOT_PREALLOC_BLOCK(inode, alloc_count > block_count ? block_count : alloc_count)) 722 alloc_count = 0; 723 else if (alloc_count > block_count) 724 { 725 alloc_count = block_count; 726 eloc.logicalBlockNum += alloc_count; 727 elen -= (alloc_count << sb->s_blocksize_bits); 728 udf_write_aext(table, bloc, &extoffset, eloc, (etype << 30) | elen, bh, 1); 729 } 730 else 731 udf_delete_aext(table, bloc, extoffset, eloc, (etype << 30) | elen, bh); 732 } 733 else 734 alloc_count = 0; 735 736 udf_release_data(bh); 737 738 if (alloc_count && UDF_SB_LVIDBH(sb)) 739 { 740 UDF_SB_LVID(sb)->freeSpaceTable[partition] = 741 cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition])-alloc_count); 742 mark_buffer_dirty(UDF_SB_LVIDBH(sb)); 743 sb->s_dirt = 1; 744 } 745 up(&sbi->s_alloc_sem); 746 return alloc_count; 747 } 748 749 static int udf_table_new_block(struct super_block * sb, 750 struct inode * inode, 751 struct inode *table, uint16_t partition, uint32_t goal, int *err) 752 { 753 struct udf_sb_info *sbi = UDF_SB(sb); 754 uint32_t spread = 0xFFFFFFFF, nspread = 0xFFFFFFFF; 755 uint32_t newblock = 0, adsize; 756 uint32_t extoffset, goal_extoffset, elen, goal_elen = 0; 757 kernel_lb_addr bloc, goal_bloc, eloc, goal_eloc; 758 struct buffer_head *bh, *goal_bh; 759 int8_t etype; 760 761 *err = -ENOSPC; 762 763 if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_SHORT) 764 adsize = sizeof(short_ad); 765 else if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_LONG) 766 adsize = sizeof(long_ad); 767 else 768 return newblock; 769 770 down(&sbi->s_alloc_sem); 771 if (goal < 0 || goal >= UDF_SB_PARTLEN(sb, partition)) 772 goal = 0; 773 774 /* We search for the closest matching block to goal. If we find a exact hit, 775 we stop. Otherwise we keep going till we run out of extents. 776 We store the buffer_head, bloc, and extoffset of the current closest 777 match and use that when we are done. 778 */ 779 780 extoffset = sizeof(struct unallocSpaceEntry); 781 bloc = UDF_I_LOCATION(table); 782 783 goal_bh = bh = NULL; 784 785 while (spread && (etype = 786 udf_next_aext(table, &bloc, &extoffset, &eloc, &elen, &bh, 1)) != -1) 787 { 788 if (goal >= eloc.logicalBlockNum) 789 { 790 if (goal < eloc.logicalBlockNum + (elen >> sb->s_blocksize_bits)) 791 nspread = 0; 792 else 793 nspread = goal - eloc.logicalBlockNum - 794 (elen >> sb->s_blocksize_bits); 795 } 796 else 797 nspread = eloc.logicalBlockNum - goal; 798 799 if (nspread < spread) 800 { 801 spread = nspread; 802 if (goal_bh != bh) 803 { 804 udf_release_data(goal_bh); 805 goal_bh = bh; 806 atomic_inc(&goal_bh->b_count); 807 } 808 goal_bloc = bloc; 809 goal_extoffset = extoffset - adsize; 810 goal_eloc = eloc; 811 goal_elen = (etype << 30) | elen; 812 } 813 } 814 815 udf_release_data(bh); 816 817 if (spread == 0xFFFFFFFF) 818 { 819 udf_release_data(goal_bh); 820 up(&sbi->s_alloc_sem); 821 return 0; 822 } 823 824 /* Only allocate blocks from the beginning of the extent. 825 That way, we only delete (empty) extents, never have to insert an 826 extent because of splitting */ 827 /* This works, but very poorly.... */ 828 829 newblock = goal_eloc.logicalBlockNum; 830 goal_eloc.logicalBlockNum ++; 831 goal_elen -= sb->s_blocksize; 832 833 if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) 834 { 835 udf_release_data(goal_bh); 836 up(&sbi->s_alloc_sem); 837 *err = -EDQUOT; 838 return 0; 839 } 840 841 if (goal_elen) 842 udf_write_aext(table, goal_bloc, &goal_extoffset, goal_eloc, goal_elen, goal_bh, 1); 843 else 844 udf_delete_aext(table, goal_bloc, goal_extoffset, goal_eloc, goal_elen, goal_bh); 845 udf_release_data(goal_bh); 846 847 if (UDF_SB_LVIDBH(sb)) 848 { 849 UDF_SB_LVID(sb)->freeSpaceTable[partition] = 850 cpu_to_le32(le32_to_cpu(UDF_SB_LVID(sb)->freeSpaceTable[partition])-1); 851 mark_buffer_dirty(UDF_SB_LVIDBH(sb)); 852 } 853 854 sb->s_dirt = 1; 855 up(&sbi->s_alloc_sem); 856 *err = 0; 857 return newblock; 858 } 859 860 inline void udf_free_blocks(struct super_block * sb, 861 struct inode * inode, 862 kernel_lb_addr bloc, uint32_t offset, uint32_t count) 863 { 864 uint16_t partition = bloc.partitionReferenceNum; 865 866 if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP) 867 { 868 return udf_bitmap_free_blocks(sb, inode, 869 UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_bitmap, 870 bloc, offset, count); 871 } 872 else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_TABLE) 873 { 874 return udf_table_free_blocks(sb, inode, 875 UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_table, 876 bloc, offset, count); 877 } 878 else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP) 879 { 880 return udf_bitmap_free_blocks(sb, inode, 881 UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_bitmap, 882 bloc, offset, count); 883 } 884 else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE) 885 { 886 return udf_table_free_blocks(sb, inode, 887 UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_table, 888 bloc, offset, count); 889 } 890 else 891 return; 892 } 893 894 inline int udf_prealloc_blocks(struct super_block * sb, 895 struct inode * inode, 896 uint16_t partition, uint32_t first_block, uint32_t block_count) 897 { 898 if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP) 899 { 900 return udf_bitmap_prealloc_blocks(sb, inode, 901 UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_bitmap, 902 partition, first_block, block_count); 903 } 904 else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_TABLE) 905 { 906 return udf_table_prealloc_blocks(sb, inode, 907 UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_table, 908 partition, first_block, block_count); 909 } 910 else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP) 911 { 912 return udf_bitmap_prealloc_blocks(sb, inode, 913 UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_bitmap, 914 partition, first_block, block_count); 915 } 916 else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE) 917 { 918 return udf_table_prealloc_blocks(sb, inode, 919 UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_table, 920 partition, first_block, block_count); 921 } 922 else 923 return 0; 924 } 925 926 inline int udf_new_block(struct super_block * sb, 927 struct inode * inode, 928 uint16_t partition, uint32_t goal, int *err) 929 { 930 if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP) 931 { 932 return udf_bitmap_new_block(sb, inode, 933 UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_bitmap, 934 partition, goal, err); 935 } 936 else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_TABLE) 937 { 938 return udf_table_new_block(sb, inode, 939 UDF_SB_PARTMAPS(sb)[partition].s_uspace.s_table, 940 partition, goal, err); 941 } 942 else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP) 943 { 944 return udf_bitmap_new_block(sb, inode, 945 UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_bitmap, 946 partition, goal, err); 947 } 948 else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE) 949 { 950 return udf_table_new_block(sb, inode, 951 UDF_SB_PARTMAPS(sb)[partition].s_fspace.s_table, 952 partition, goal, err); 953 } 954 else 955 { 956 *err = -EIO; 957 return 0; 958 } 959 } 960