1 /* 2 * inode.c 3 * 4 * PURPOSE 5 * Inode handling routines for the OSTA-UDF(tm) filesystem. 6 * 7 * COPYRIGHT 8 * This file is distributed under the terms of the GNU General Public 9 * License (GPL). Copies of the GPL can be obtained from: 10 * ftp://prep.ai.mit.edu/pub/gnu/GPL 11 * Each contributing author retains all rights to their own work. 12 * 13 * (C) 1998 Dave Boynton 14 * (C) 1998-2004 Ben Fennema 15 * (C) 1999-2000 Stelias Computing Inc 16 * 17 * HISTORY 18 * 19 * 10/04/98 dgb Added rudimentary directory functions 20 * 10/07/98 Fully working udf_block_map! It works! 21 * 11/25/98 bmap altered to better support extents 22 * 12/06/98 blf partition support in udf_iget, udf_block_map and udf_read_inode 23 * 12/12/98 rewrote udf_block_map to handle next extents and descs across 24 * block boundaries (which is not actually allowed) 25 * 12/20/98 added support for strategy 4096 26 * 03/07/99 rewrote udf_block_map (again) 27 * New funcs, inode_bmap, udf_next_aext 28 * 04/19/99 Support for writing device EA's for major/minor # 29 */ 30 31 #include "udfdecl.h" 32 #include <linux/mm.h> 33 #include <linux/smp_lock.h> 34 #include <linux/module.h> 35 #include <linux/pagemap.h> 36 #include <linux/buffer_head.h> 37 #include <linux/writeback.h> 38 #include <linux/slab.h> 39 40 #include "udf_i.h" 41 #include "udf_sb.h" 42 43 MODULE_AUTHOR("Ben Fennema"); 44 MODULE_DESCRIPTION("Universal Disk Format Filesystem"); 45 MODULE_LICENSE("GPL"); 46 47 #define EXTENT_MERGE_SIZE 5 48 49 static mode_t udf_convert_permissions(struct fileEntry *); 50 static int udf_update_inode(struct inode *, int); 51 static void udf_fill_inode(struct inode *, struct buffer_head *); 52 static struct buffer_head *inode_getblk(struct inode *, sector_t, int *, 53 long *, int *); 54 static int8_t udf_insert_aext(struct inode *, struct extent_position, 55 kernel_lb_addr, uint32_t); 56 static void udf_split_extents(struct inode *, int *, int, int, 57 kernel_long_ad [EXTENT_MERGE_SIZE], int *); 58 static void udf_prealloc_extents(struct inode *, int, int, 59 kernel_long_ad [EXTENT_MERGE_SIZE], int *); 60 static void udf_merge_extents(struct inode *, 61 kernel_long_ad [EXTENT_MERGE_SIZE], int *); 62 static void udf_update_extents(struct inode *, 63 kernel_long_ad [EXTENT_MERGE_SIZE], int, int, 64 struct extent_position *); 65 static int udf_get_block(struct inode *, sector_t, struct buffer_head *, int); 66 67 /* 68 * udf_delete_inode 69 * 70 * PURPOSE 71 * Clean-up before the specified inode is destroyed. 72 * 73 * DESCRIPTION 74 * This routine is called when the kernel destroys an inode structure 75 * ie. when iput() finds i_count == 0. 76 * 77 * HISTORY 78 * July 1, 1997 - Andrew E. Mileski 79 * Written, tested, and released. 80 * 81 * Called at the last iput() if i_nlink is zero. 82 */ 83 void udf_delete_inode(struct inode * inode) 84 { 85 truncate_inode_pages(&inode->i_data, 0); 86 87 if (is_bad_inode(inode)) 88 goto no_delete; 89 90 inode->i_size = 0; 91 udf_truncate(inode); 92 lock_kernel(); 93 94 udf_update_inode(inode, IS_SYNC(inode)); 95 udf_free_inode(inode); 96 97 unlock_kernel(); 98 return; 99 no_delete: 100 clear_inode(inode); 101 } 102 103 void udf_clear_inode(struct inode *inode) 104 { 105 if (!(inode->i_sb->s_flags & MS_RDONLY)) { 106 lock_kernel(); 107 udf_discard_prealloc(inode); 108 unlock_kernel(); 109 } 110 111 kfree(UDF_I_DATA(inode)); 112 UDF_I_DATA(inode) = NULL; 113 } 114 115 static int udf_writepage(struct page *page, struct writeback_control *wbc) 116 { 117 return block_write_full_page(page, udf_get_block, wbc); 118 } 119 120 static int udf_readpage(struct file *file, struct page *page) 121 { 122 return block_read_full_page(page, udf_get_block); 123 } 124 125 static int udf_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to) 126 { 127 return block_prepare_write(page, from, to, udf_get_block); 128 } 129 130 static sector_t udf_bmap(struct address_space *mapping, sector_t block) 131 { 132 return generic_block_bmap(mapping,block,udf_get_block); 133 } 134 135 const struct address_space_operations udf_aops = { 136 .readpage = udf_readpage, 137 .writepage = udf_writepage, 138 .sync_page = block_sync_page, 139 .prepare_write = udf_prepare_write, 140 .commit_write = generic_commit_write, 141 .bmap = udf_bmap, 142 }; 143 144 void udf_expand_file_adinicb(struct inode * inode, int newsize, int * err) 145 { 146 struct page *page; 147 char *kaddr; 148 struct writeback_control udf_wbc = { 149 .sync_mode = WB_SYNC_NONE, 150 .nr_to_write = 1, 151 }; 152 153 /* from now on we have normal address_space methods */ 154 inode->i_data.a_ops = &udf_aops; 155 156 if (!UDF_I_LENALLOC(inode)) 157 { 158 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD)) 159 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT; 160 else 161 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG; 162 mark_inode_dirty(inode); 163 return; 164 } 165 166 page = grab_cache_page(inode->i_mapping, 0); 167 BUG_ON(!PageLocked(page)); 168 169 if (!PageUptodate(page)) 170 { 171 kaddr = kmap(page); 172 memset(kaddr + UDF_I_LENALLOC(inode), 0x00, 173 PAGE_CACHE_SIZE - UDF_I_LENALLOC(inode)); 174 memcpy(kaddr, UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 175 UDF_I_LENALLOC(inode)); 176 flush_dcache_page(page); 177 SetPageUptodate(page); 178 kunmap(page); 179 } 180 memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 0x00, 181 UDF_I_LENALLOC(inode)); 182 UDF_I_LENALLOC(inode) = 0; 183 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD)) 184 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT; 185 else 186 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG; 187 188 inode->i_data.a_ops->writepage(page, &udf_wbc); 189 page_cache_release(page); 190 191 mark_inode_dirty(inode); 192 } 193 194 struct buffer_head * udf_expand_dir_adinicb(struct inode *inode, int *block, int *err) 195 { 196 int newblock; 197 struct buffer_head *dbh = NULL; 198 kernel_lb_addr eloc; 199 uint32_t elen; 200 uint8_t alloctype; 201 struct extent_position epos; 202 203 struct udf_fileident_bh sfibh, dfibh; 204 loff_t f_pos = udf_ext0_offset(inode) >> 2; 205 int size = (udf_ext0_offset(inode) + inode->i_size) >> 2; 206 struct fileIdentDesc cfi, *sfi, *dfi; 207 208 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD)) 209 alloctype = ICBTAG_FLAG_AD_SHORT; 210 else 211 alloctype = ICBTAG_FLAG_AD_LONG; 212 213 if (!inode->i_size) 214 { 215 UDF_I_ALLOCTYPE(inode) = alloctype; 216 mark_inode_dirty(inode); 217 return NULL; 218 } 219 220 /* alloc block, and copy data to it */ 221 *block = udf_new_block(inode->i_sb, inode, 222 UDF_I_LOCATION(inode).partitionReferenceNum, 223 UDF_I_LOCATION(inode).logicalBlockNum, err); 224 225 if (!(*block)) 226 return NULL; 227 newblock = udf_get_pblock(inode->i_sb, *block, 228 UDF_I_LOCATION(inode).partitionReferenceNum, 0); 229 if (!newblock) 230 return NULL; 231 dbh = udf_tgetblk(inode->i_sb, newblock); 232 if (!dbh) 233 return NULL; 234 lock_buffer(dbh); 235 memset(dbh->b_data, 0x00, inode->i_sb->s_blocksize); 236 set_buffer_uptodate(dbh); 237 unlock_buffer(dbh); 238 mark_buffer_dirty_inode(dbh, inode); 239 240 sfibh.soffset = sfibh.eoffset = (f_pos & ((inode->i_sb->s_blocksize - 1) >> 2)) << 2; 241 sfibh.sbh = sfibh.ebh = NULL; 242 dfibh.soffset = dfibh.eoffset = 0; 243 dfibh.sbh = dfibh.ebh = dbh; 244 while ( (f_pos < size) ) 245 { 246 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB; 247 sfi = udf_fileident_read(inode, &f_pos, &sfibh, &cfi, NULL, NULL, NULL, NULL); 248 if (!sfi) 249 { 250 brelse(dbh); 251 return NULL; 252 } 253 UDF_I_ALLOCTYPE(inode) = alloctype; 254 sfi->descTag.tagLocation = cpu_to_le32(*block); 255 dfibh.soffset = dfibh.eoffset; 256 dfibh.eoffset += (sfibh.eoffset - sfibh.soffset); 257 dfi = (struct fileIdentDesc *)(dbh->b_data + dfibh.soffset); 258 if (udf_write_fi(inode, sfi, dfi, &dfibh, sfi->impUse, 259 sfi->fileIdent + le16_to_cpu(sfi->lengthOfImpUse))) 260 { 261 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB; 262 brelse(dbh); 263 return NULL; 264 } 265 } 266 mark_buffer_dirty_inode(dbh, inode); 267 268 memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 0, UDF_I_LENALLOC(inode)); 269 UDF_I_LENALLOC(inode) = 0; 270 eloc.logicalBlockNum = *block; 271 eloc.partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum; 272 elen = inode->i_size; 273 UDF_I_LENEXTENTS(inode) = elen; 274 epos.bh = NULL; 275 epos.block = UDF_I_LOCATION(inode); 276 epos.offset = udf_file_entry_alloc_offset(inode); 277 udf_add_aext(inode, &epos, eloc, elen, 0); 278 /* UniqueID stuff */ 279 280 brelse(epos.bh); 281 mark_inode_dirty(inode); 282 return dbh; 283 } 284 285 static int udf_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create) 286 { 287 int err, new; 288 struct buffer_head *bh; 289 unsigned long phys; 290 291 if (!create) 292 { 293 phys = udf_block_map(inode, block); 294 if (phys) 295 map_bh(bh_result, inode->i_sb, phys); 296 return 0; 297 } 298 299 err = -EIO; 300 new = 0; 301 bh = NULL; 302 303 lock_kernel(); 304 305 if (block < 0) 306 goto abort_negative; 307 308 if (block == UDF_I_NEXT_ALLOC_BLOCK(inode) + 1) 309 { 310 UDF_I_NEXT_ALLOC_BLOCK(inode) ++; 311 UDF_I_NEXT_ALLOC_GOAL(inode) ++; 312 } 313 314 err = 0; 315 316 bh = inode_getblk(inode, block, &err, &phys, &new); 317 BUG_ON(bh); 318 if (err) 319 goto abort; 320 BUG_ON(!phys); 321 322 if (new) 323 set_buffer_new(bh_result); 324 map_bh(bh_result, inode->i_sb, phys); 325 abort: 326 unlock_kernel(); 327 return err; 328 329 abort_negative: 330 udf_warning(inode->i_sb, "udf_get_block", "block < 0"); 331 goto abort; 332 } 333 334 static struct buffer_head * 335 udf_getblk(struct inode *inode, long block, int create, int *err) 336 { 337 struct buffer_head dummy; 338 339 dummy.b_state = 0; 340 dummy.b_blocknr = -1000; 341 *err = udf_get_block(inode, block, &dummy, create); 342 if (!*err && buffer_mapped(&dummy)) 343 { 344 struct buffer_head *bh; 345 bh = sb_getblk(inode->i_sb, dummy.b_blocknr); 346 if (buffer_new(&dummy)) 347 { 348 lock_buffer(bh); 349 memset(bh->b_data, 0x00, inode->i_sb->s_blocksize); 350 set_buffer_uptodate(bh); 351 unlock_buffer(bh); 352 mark_buffer_dirty_inode(bh, inode); 353 } 354 return bh; 355 } 356 return NULL; 357 } 358 359 /* Extend the file by 'blocks' blocks, return the number of extents added */ 360 int udf_extend_file(struct inode *inode, struct extent_position *last_pos, 361 kernel_long_ad *last_ext, sector_t blocks) 362 { 363 sector_t add; 364 int count = 0, fake = !(last_ext->extLength & UDF_EXTENT_LENGTH_MASK); 365 struct super_block *sb = inode->i_sb; 366 kernel_lb_addr prealloc_loc = {0, 0}; 367 int prealloc_len = 0; 368 369 /* The previous extent is fake and we should not extend by anything 370 * - there's nothing to do... */ 371 if (!blocks && fake) 372 return 0; 373 /* Round the last extent up to a multiple of block size */ 374 if (last_ext->extLength & (sb->s_blocksize - 1)) { 375 last_ext->extLength = 376 (last_ext->extLength & UDF_EXTENT_FLAG_MASK) | 377 (((last_ext->extLength & UDF_EXTENT_LENGTH_MASK) + 378 sb->s_blocksize - 1) & ~(sb->s_blocksize - 1)); 379 UDF_I_LENEXTENTS(inode) = 380 (UDF_I_LENEXTENTS(inode) + sb->s_blocksize - 1) & 381 ~(sb->s_blocksize - 1); 382 } 383 /* Last extent are just preallocated blocks? */ 384 if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) == EXT_NOT_RECORDED_ALLOCATED) { 385 /* Save the extent so that we can reattach it to the end */ 386 prealloc_loc = last_ext->extLocation; 387 prealloc_len = last_ext->extLength; 388 /* Mark the extent as a hole */ 389 last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | 390 (last_ext->extLength & UDF_EXTENT_LENGTH_MASK); 391 last_ext->extLocation.logicalBlockNum = 0; 392 last_ext->extLocation.partitionReferenceNum = 0; 393 } 394 /* Can we merge with the previous extent? */ 395 if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) == EXT_NOT_RECORDED_NOT_ALLOCATED) { 396 add = ((1<<30) - sb->s_blocksize - (last_ext->extLength & 397 UDF_EXTENT_LENGTH_MASK)) >> sb->s_blocksize_bits; 398 if (add > blocks) 399 add = blocks; 400 blocks -= add; 401 last_ext->extLength += add << sb->s_blocksize_bits; 402 } 403 404 if (fake) { 405 udf_add_aext(inode, last_pos, last_ext->extLocation, 406 last_ext->extLength, 1); 407 count++; 408 } 409 else 410 udf_write_aext(inode, last_pos, last_ext->extLocation, last_ext->extLength, 1); 411 /* Managed to do everything necessary? */ 412 if (!blocks) 413 goto out; 414 415 /* All further extents will be NOT_RECORDED_NOT_ALLOCATED */ 416 last_ext->extLocation.logicalBlockNum = 0; 417 last_ext->extLocation.partitionReferenceNum = 0; 418 add = (1 << (30-sb->s_blocksize_bits)) - 1; 419 last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | (add << sb->s_blocksize_bits); 420 /* Create enough extents to cover the whole hole */ 421 while (blocks > add) { 422 blocks -= add; 423 if (udf_add_aext(inode, last_pos, last_ext->extLocation, 424 last_ext->extLength, 1) == -1) 425 return -1; 426 count++; 427 } 428 if (blocks) { 429 last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | 430 (blocks << sb->s_blocksize_bits); 431 if (udf_add_aext(inode, last_pos, last_ext->extLocation, 432 last_ext->extLength, 1) == -1) 433 return -1; 434 count++; 435 } 436 out: 437 /* Do we have some preallocated blocks saved? */ 438 if (prealloc_len) { 439 if (udf_add_aext(inode, last_pos, prealloc_loc, prealloc_len, 1) == -1) 440 return -1; 441 last_ext->extLocation = prealloc_loc; 442 last_ext->extLength = prealloc_len; 443 count++; 444 } 445 /* last_pos should point to the last written extent... */ 446 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT) 447 last_pos->offset -= sizeof(short_ad); 448 else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG) 449 last_pos->offset -= sizeof(long_ad); 450 else 451 return -1; 452 return count; 453 } 454 455 static struct buffer_head * inode_getblk(struct inode * inode, sector_t block, 456 int *err, long *phys, int *new) 457 { 458 static sector_t last_block; 459 struct buffer_head *result = NULL; 460 kernel_long_ad laarr[EXTENT_MERGE_SIZE]; 461 struct extent_position prev_epos, cur_epos, next_epos; 462 int count = 0, startnum = 0, endnum = 0; 463 uint32_t elen = 0, tmpelen; 464 kernel_lb_addr eloc, tmpeloc; 465 int c = 1; 466 loff_t lbcount = 0, b_off = 0; 467 uint32_t newblocknum, newblock; 468 sector_t offset = 0; 469 int8_t etype; 470 int goal = 0, pgoal = UDF_I_LOCATION(inode).logicalBlockNum; 471 int lastblock = 0; 472 473 prev_epos.offset = udf_file_entry_alloc_offset(inode); 474 prev_epos.block = UDF_I_LOCATION(inode); 475 prev_epos.bh = NULL; 476 cur_epos = next_epos = prev_epos; 477 b_off = (loff_t)block << inode->i_sb->s_blocksize_bits; 478 479 /* find the extent which contains the block we are looking for. 480 alternate between laarr[0] and laarr[1] for locations of the 481 current extent, and the previous extent */ 482 do 483 { 484 if (prev_epos.bh != cur_epos.bh) 485 { 486 brelse(prev_epos.bh); 487 get_bh(cur_epos.bh); 488 prev_epos.bh = cur_epos.bh; 489 } 490 if (cur_epos.bh != next_epos.bh) 491 { 492 brelse(cur_epos.bh); 493 get_bh(next_epos.bh); 494 cur_epos.bh = next_epos.bh; 495 } 496 497 lbcount += elen; 498 499 prev_epos.block = cur_epos.block; 500 cur_epos.block = next_epos.block; 501 502 prev_epos.offset = cur_epos.offset; 503 cur_epos.offset = next_epos.offset; 504 505 if ((etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 1)) == -1) 506 break; 507 508 c = !c; 509 510 laarr[c].extLength = (etype << 30) | elen; 511 laarr[c].extLocation = eloc; 512 513 if (etype != (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) 514 pgoal = eloc.logicalBlockNum + 515 ((elen + inode->i_sb->s_blocksize - 1) >> 516 inode->i_sb->s_blocksize_bits); 517 518 count ++; 519 } while (lbcount + elen <= b_off); 520 521 b_off -= lbcount; 522 offset = b_off >> inode->i_sb->s_blocksize_bits; 523 /* 524 * Move prev_epos and cur_epos into indirect extent if we are at 525 * the pointer to it 526 */ 527 udf_next_aext(inode, &prev_epos, &tmpeloc, &tmpelen, 0); 528 udf_next_aext(inode, &cur_epos, &tmpeloc, &tmpelen, 0); 529 530 /* if the extent is allocated and recorded, return the block 531 if the extent is not a multiple of the blocksize, round up */ 532 533 if (etype == (EXT_RECORDED_ALLOCATED >> 30)) 534 { 535 if (elen & (inode->i_sb->s_blocksize - 1)) 536 { 537 elen = EXT_RECORDED_ALLOCATED | 538 ((elen + inode->i_sb->s_blocksize - 1) & 539 ~(inode->i_sb->s_blocksize - 1)); 540 etype = udf_write_aext(inode, &cur_epos, eloc, elen, 1); 541 } 542 brelse(prev_epos.bh); 543 brelse(cur_epos.bh); 544 brelse(next_epos.bh); 545 newblock = udf_get_lb_pblock(inode->i_sb, eloc, offset); 546 *phys = newblock; 547 return NULL; 548 } 549 550 last_block = block; 551 /* Are we beyond EOF? */ 552 if (etype == -1) 553 { 554 int ret; 555 556 if (count) { 557 if (c) 558 laarr[0] = laarr[1]; 559 startnum = 1; 560 } 561 else { 562 /* Create a fake extent when there's not one */ 563 memset(&laarr[0].extLocation, 0x00, sizeof(kernel_lb_addr)); 564 laarr[0].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED; 565 /* Will udf_extend_file() create real extent from a fake one? */ 566 startnum = (offset > 0); 567 } 568 /* Create extents for the hole between EOF and offset */ 569 ret = udf_extend_file(inode, &prev_epos, laarr, offset); 570 if (ret == -1) { 571 brelse(prev_epos.bh); 572 brelse(cur_epos.bh); 573 brelse(next_epos.bh); 574 /* We don't really know the error here so we just make 575 * something up */ 576 *err = -ENOSPC; 577 return NULL; 578 } 579 c = 0; 580 offset = 0; 581 count += ret; 582 /* We are not covered by a preallocated extent? */ 583 if ((laarr[0].extLength & UDF_EXTENT_FLAG_MASK) != EXT_NOT_RECORDED_ALLOCATED) { 584 /* Is there any real extent? - otherwise we overwrite 585 * the fake one... */ 586 if (count) 587 c = !c; 588 laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | 589 inode->i_sb->s_blocksize; 590 memset(&laarr[c].extLocation, 0x00, sizeof(kernel_lb_addr)); 591 count ++; 592 endnum ++; 593 } 594 endnum = c+1; 595 lastblock = 1; 596 } 597 else { 598 endnum = startnum = ((count > 2) ? 2 : count); 599 600 /* if the current extent is in position 0, swap it with the previous */ 601 if (!c && count != 1) 602 { 603 laarr[2] = laarr[0]; 604 laarr[0] = laarr[1]; 605 laarr[1] = laarr[2]; 606 c = 1; 607 } 608 609 /* if the current block is located in an extent, read the next extent */ 610 if ((etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 0)) != -1) 611 { 612 laarr[c+1].extLength = (etype << 30) | elen; 613 laarr[c+1].extLocation = eloc; 614 count ++; 615 startnum ++; 616 endnum ++; 617 } 618 else { 619 lastblock = 1; 620 } 621 } 622 623 /* if the current extent is not recorded but allocated, get the 624 block in the extent corresponding to the requested block */ 625 if ((laarr[c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) 626 newblocknum = laarr[c].extLocation.logicalBlockNum + offset; 627 else /* otherwise, allocate a new block */ 628 { 629 if (UDF_I_NEXT_ALLOC_BLOCK(inode) == block) 630 goal = UDF_I_NEXT_ALLOC_GOAL(inode); 631 632 if (!goal) 633 { 634 if (!(goal = pgoal)) 635 goal = UDF_I_LOCATION(inode).logicalBlockNum + 1; 636 } 637 638 if (!(newblocknum = udf_new_block(inode->i_sb, inode, 639 UDF_I_LOCATION(inode).partitionReferenceNum, goal, err))) 640 { 641 brelse(prev_epos.bh); 642 *err = -ENOSPC; 643 return NULL; 644 } 645 UDF_I_LENEXTENTS(inode) += inode->i_sb->s_blocksize; 646 } 647 648 /* if the extent the requsted block is located in contains multiple blocks, 649 split the extent into at most three extents. blocks prior to requested 650 block, requested block, and blocks after requested block */ 651 udf_split_extents(inode, &c, offset, newblocknum, laarr, &endnum); 652 653 #ifdef UDF_PREALLOCATE 654 /* preallocate blocks */ 655 udf_prealloc_extents(inode, c, lastblock, laarr, &endnum); 656 #endif 657 658 /* merge any continuous blocks in laarr */ 659 udf_merge_extents(inode, laarr, &endnum); 660 661 /* write back the new extents, inserting new extents if the new number 662 of extents is greater than the old number, and deleting extents if 663 the new number of extents is less than the old number */ 664 udf_update_extents(inode, laarr, startnum, endnum, &prev_epos); 665 666 brelse(prev_epos.bh); 667 668 if (!(newblock = udf_get_pblock(inode->i_sb, newblocknum, 669 UDF_I_LOCATION(inode).partitionReferenceNum, 0))) 670 { 671 return NULL; 672 } 673 *phys = newblock; 674 *err = 0; 675 *new = 1; 676 UDF_I_NEXT_ALLOC_BLOCK(inode) = block; 677 UDF_I_NEXT_ALLOC_GOAL(inode) = newblocknum; 678 inode->i_ctime = current_fs_time(inode->i_sb); 679 680 if (IS_SYNC(inode)) 681 udf_sync_inode(inode); 682 else 683 mark_inode_dirty(inode); 684 return result; 685 } 686 687 static void udf_split_extents(struct inode *inode, int *c, int offset, int newblocknum, 688 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum) 689 { 690 if ((laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30) || 691 (laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) 692 { 693 int curr = *c; 694 int blen = ((laarr[curr].extLength & UDF_EXTENT_LENGTH_MASK) + 695 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits; 696 int8_t etype = (laarr[curr].extLength >> 30); 697 698 if (blen == 1) 699 ; 700 else if (!offset || blen == offset + 1) 701 { 702 laarr[curr+2] = laarr[curr+1]; 703 laarr[curr+1] = laarr[curr]; 704 } 705 else 706 { 707 laarr[curr+3] = laarr[curr+1]; 708 laarr[curr+2] = laarr[curr+1] = laarr[curr]; 709 } 710 711 if (offset) 712 { 713 if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) 714 { 715 udf_free_blocks(inode->i_sb, inode, laarr[curr].extLocation, 0, offset); 716 laarr[curr].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | 717 (offset << inode->i_sb->s_blocksize_bits); 718 laarr[curr].extLocation.logicalBlockNum = 0; 719 laarr[curr].extLocation.partitionReferenceNum = 0; 720 } 721 else 722 laarr[curr].extLength = (etype << 30) | 723 (offset << inode->i_sb->s_blocksize_bits); 724 curr ++; 725 (*c) ++; 726 (*endnum) ++; 727 } 728 729 laarr[curr].extLocation.logicalBlockNum = newblocknum; 730 if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) 731 laarr[curr].extLocation.partitionReferenceNum = 732 UDF_I_LOCATION(inode).partitionReferenceNum; 733 laarr[curr].extLength = EXT_RECORDED_ALLOCATED | 734 inode->i_sb->s_blocksize; 735 curr ++; 736 737 if (blen != offset + 1) 738 { 739 if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) 740 laarr[curr].extLocation.logicalBlockNum += (offset + 1); 741 laarr[curr].extLength = (etype << 30) | 742 ((blen - (offset + 1)) << inode->i_sb->s_blocksize_bits); 743 curr ++; 744 (*endnum) ++; 745 } 746 } 747 } 748 749 static void udf_prealloc_extents(struct inode *inode, int c, int lastblock, 750 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum) 751 { 752 int start, length = 0, currlength = 0, i; 753 754 if (*endnum >= (c+1)) 755 { 756 if (!lastblock) 757 return; 758 else 759 start = c; 760 } 761 else 762 { 763 if ((laarr[c+1].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) 764 { 765 start = c+1; 766 length = currlength = (((laarr[c+1].extLength & UDF_EXTENT_LENGTH_MASK) + 767 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits); 768 } 769 else 770 start = c; 771 } 772 773 for (i=start+1; i<=*endnum; i++) 774 { 775 if (i == *endnum) 776 { 777 if (lastblock) 778 length += UDF_DEFAULT_PREALLOC_BLOCKS; 779 } 780 else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) 781 length += (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + 782 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits); 783 else 784 break; 785 } 786 787 if (length) 788 { 789 int next = laarr[start].extLocation.logicalBlockNum + 790 (((laarr[start].extLength & UDF_EXTENT_LENGTH_MASK) + 791 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits); 792 int numalloc = udf_prealloc_blocks(inode->i_sb, inode, 793 laarr[start].extLocation.partitionReferenceNum, 794 next, (UDF_DEFAULT_PREALLOC_BLOCKS > length ? length : 795 UDF_DEFAULT_PREALLOC_BLOCKS) - currlength); 796 797 if (numalloc) 798 { 799 if (start == (c+1)) 800 laarr[start].extLength += 801 (numalloc << inode->i_sb->s_blocksize_bits); 802 else 803 { 804 memmove(&laarr[c+2], &laarr[c+1], 805 sizeof(long_ad) * (*endnum - (c+1))); 806 (*endnum) ++; 807 laarr[c+1].extLocation.logicalBlockNum = next; 808 laarr[c+1].extLocation.partitionReferenceNum = 809 laarr[c].extLocation.partitionReferenceNum; 810 laarr[c+1].extLength = EXT_NOT_RECORDED_ALLOCATED | 811 (numalloc << inode->i_sb->s_blocksize_bits); 812 start = c+1; 813 } 814 815 for (i=start+1; numalloc && i<*endnum; i++) 816 { 817 int elen = ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + 818 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits; 819 820 if (elen > numalloc) 821 { 822 laarr[i].extLength -= 823 (numalloc << inode->i_sb->s_blocksize_bits); 824 numalloc = 0; 825 } 826 else 827 { 828 numalloc -= elen; 829 if (*endnum > (i+1)) 830 memmove(&laarr[i], &laarr[i+1], 831 sizeof(long_ad) * (*endnum - (i+1))); 832 i --; 833 (*endnum) --; 834 } 835 } 836 UDF_I_LENEXTENTS(inode) += numalloc << inode->i_sb->s_blocksize_bits; 837 } 838 } 839 } 840 841 static void udf_merge_extents(struct inode *inode, 842 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum) 843 { 844 int i; 845 846 for (i=0; i<(*endnum-1); i++) 847 { 848 if ((laarr[i].extLength >> 30) == (laarr[i+1].extLength >> 30)) 849 { 850 if (((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) || 851 ((laarr[i+1].extLocation.logicalBlockNum - laarr[i].extLocation.logicalBlockNum) == 852 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + 853 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits))) 854 { 855 if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + 856 (laarr[i+1].extLength & UDF_EXTENT_LENGTH_MASK) + 857 inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK) 858 { 859 laarr[i+1].extLength = (laarr[i+1].extLength - 860 (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + 861 UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize-1); 862 laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_FLAG_MASK) + 863 (UDF_EXTENT_LENGTH_MASK + 1) - inode->i_sb->s_blocksize; 864 laarr[i+1].extLocation.logicalBlockNum = 865 laarr[i].extLocation.logicalBlockNum + 866 ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) >> 867 inode->i_sb->s_blocksize_bits); 868 } 869 else 870 { 871 laarr[i].extLength = laarr[i+1].extLength + 872 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + 873 inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize-1)); 874 if (*endnum > (i+2)) 875 memmove(&laarr[i+1], &laarr[i+2], 876 sizeof(long_ad) * (*endnum - (i+2))); 877 i --; 878 (*endnum) --; 879 } 880 } 881 } 882 else if (((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) && 883 ((laarr[i+1].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))) 884 { 885 udf_free_blocks(inode->i_sb, inode, laarr[i].extLocation, 0, 886 ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + 887 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits); 888 laarr[i].extLocation.logicalBlockNum = 0; 889 laarr[i].extLocation.partitionReferenceNum = 0; 890 891 if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + 892 (laarr[i+1].extLength & UDF_EXTENT_LENGTH_MASK) + 893 inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK) 894 { 895 laarr[i+1].extLength = (laarr[i+1].extLength - 896 (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + 897 UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize-1); 898 laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_FLAG_MASK) + 899 (UDF_EXTENT_LENGTH_MASK + 1) - inode->i_sb->s_blocksize; 900 } 901 else 902 { 903 laarr[i].extLength = laarr[i+1].extLength + 904 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + 905 inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize-1)); 906 if (*endnum > (i+2)) 907 memmove(&laarr[i+1], &laarr[i+2], 908 sizeof(long_ad) * (*endnum - (i+2))); 909 i --; 910 (*endnum) --; 911 } 912 } 913 else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) 914 { 915 udf_free_blocks(inode->i_sb, inode, laarr[i].extLocation, 0, 916 ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) + 917 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits); 918 laarr[i].extLocation.logicalBlockNum = 0; 919 laarr[i].extLocation.partitionReferenceNum = 0; 920 laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) | 921 EXT_NOT_RECORDED_NOT_ALLOCATED; 922 } 923 } 924 } 925 926 static void udf_update_extents(struct inode *inode, 927 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int startnum, int endnum, 928 struct extent_position *epos) 929 { 930 int start = 0, i; 931 kernel_lb_addr tmploc; 932 uint32_t tmplen; 933 934 if (startnum > endnum) 935 { 936 for (i=0; i<(startnum-endnum); i++) 937 udf_delete_aext(inode, *epos, laarr[i].extLocation, 938 laarr[i].extLength); 939 } 940 else if (startnum < endnum) 941 { 942 for (i=0; i<(endnum-startnum); i++) 943 { 944 udf_insert_aext(inode, *epos, laarr[i].extLocation, 945 laarr[i].extLength); 946 udf_next_aext(inode, epos, &laarr[i].extLocation, 947 &laarr[i].extLength, 1); 948 start ++; 949 } 950 } 951 952 for (i=start; i<endnum; i++) 953 { 954 udf_next_aext(inode, epos, &tmploc, &tmplen, 0); 955 udf_write_aext(inode, epos, laarr[i].extLocation, 956 laarr[i].extLength, 1); 957 } 958 } 959 960 struct buffer_head * udf_bread(struct inode * inode, int block, 961 int create, int * err) 962 { 963 struct buffer_head * bh = NULL; 964 965 bh = udf_getblk(inode, block, create, err); 966 if (!bh) 967 return NULL; 968 969 if (buffer_uptodate(bh)) 970 return bh; 971 ll_rw_block(READ, 1, &bh); 972 wait_on_buffer(bh); 973 if (buffer_uptodate(bh)) 974 return bh; 975 brelse(bh); 976 *err = -EIO; 977 return NULL; 978 } 979 980 void udf_truncate(struct inode * inode) 981 { 982 int offset; 983 int err; 984 985 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 986 S_ISLNK(inode->i_mode))) 987 return; 988 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) 989 return; 990 991 lock_kernel(); 992 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB) 993 { 994 if (inode->i_sb->s_blocksize < (udf_file_entry_alloc_offset(inode) + 995 inode->i_size)) 996 { 997 udf_expand_file_adinicb(inode, inode->i_size, &err); 998 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB) 999 { 1000 inode->i_size = UDF_I_LENALLOC(inode); 1001 unlock_kernel(); 1002 return; 1003 } 1004 else 1005 udf_truncate_extents(inode); 1006 } 1007 else 1008 { 1009 offset = inode->i_size & (inode->i_sb->s_blocksize - 1); 1010 memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode) + offset, 0x00, inode->i_sb->s_blocksize - offset - udf_file_entry_alloc_offset(inode)); 1011 UDF_I_LENALLOC(inode) = inode->i_size; 1012 } 1013 } 1014 else 1015 { 1016 block_truncate_page(inode->i_mapping, inode->i_size, udf_get_block); 1017 udf_truncate_extents(inode); 1018 } 1019 1020 inode->i_mtime = inode->i_ctime = current_fs_time(inode->i_sb); 1021 if (IS_SYNC(inode)) 1022 udf_sync_inode (inode); 1023 else 1024 mark_inode_dirty(inode); 1025 unlock_kernel(); 1026 } 1027 1028 static void 1029 __udf_read_inode(struct inode *inode) 1030 { 1031 struct buffer_head *bh = NULL; 1032 struct fileEntry *fe; 1033 uint16_t ident; 1034 1035 /* 1036 * Set defaults, but the inode is still incomplete! 1037 * Note: get_new_inode() sets the following on a new inode: 1038 * i_sb = sb 1039 * i_no = ino 1040 * i_flags = sb->s_flags 1041 * i_state = 0 1042 * clean_inode(): zero fills and sets 1043 * i_count = 1 1044 * i_nlink = 1 1045 * i_op = NULL; 1046 */ 1047 bh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 0, &ident); 1048 1049 if (!bh) 1050 { 1051 printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed !bh\n", 1052 inode->i_ino); 1053 make_bad_inode(inode); 1054 return; 1055 } 1056 1057 if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE && 1058 ident != TAG_IDENT_USE) 1059 { 1060 printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed ident=%d\n", 1061 inode->i_ino, ident); 1062 brelse(bh); 1063 make_bad_inode(inode); 1064 return; 1065 } 1066 1067 fe = (struct fileEntry *)bh->b_data; 1068 1069 if (le16_to_cpu(fe->icbTag.strategyType) == 4096) 1070 { 1071 struct buffer_head *ibh = NULL, *nbh = NULL; 1072 struct indirectEntry *ie; 1073 1074 ibh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 1, &ident); 1075 if (ident == TAG_IDENT_IE) 1076 { 1077 if (ibh) 1078 { 1079 kernel_lb_addr loc; 1080 ie = (struct indirectEntry *)ibh->b_data; 1081 1082 loc = lelb_to_cpu(ie->indirectICB.extLocation); 1083 1084 if (ie->indirectICB.extLength && 1085 (nbh = udf_read_ptagged(inode->i_sb, loc, 0, &ident))) 1086 { 1087 if (ident == TAG_IDENT_FE || 1088 ident == TAG_IDENT_EFE) 1089 { 1090 memcpy(&UDF_I_LOCATION(inode), &loc, sizeof(kernel_lb_addr)); 1091 brelse(bh); 1092 brelse(ibh); 1093 brelse(nbh); 1094 __udf_read_inode(inode); 1095 return; 1096 } 1097 else 1098 { 1099 brelse(nbh); 1100 brelse(ibh); 1101 } 1102 } 1103 else 1104 brelse(ibh); 1105 } 1106 } 1107 else 1108 brelse(ibh); 1109 } 1110 else if (le16_to_cpu(fe->icbTag.strategyType) != 4) 1111 { 1112 printk(KERN_ERR "udf: unsupported strategy type: %d\n", 1113 le16_to_cpu(fe->icbTag.strategyType)); 1114 brelse(bh); 1115 make_bad_inode(inode); 1116 return; 1117 } 1118 udf_fill_inode(inode, bh); 1119 1120 brelse(bh); 1121 } 1122 1123 static void udf_fill_inode(struct inode *inode, struct buffer_head *bh) 1124 { 1125 struct fileEntry *fe; 1126 struct extendedFileEntry *efe; 1127 time_t convtime; 1128 long convtime_usec; 1129 int offset; 1130 1131 fe = (struct fileEntry *)bh->b_data; 1132 efe = (struct extendedFileEntry *)bh->b_data; 1133 1134 if (le16_to_cpu(fe->icbTag.strategyType) == 4) 1135 UDF_I_STRAT4096(inode) = 0; 1136 else /* if (le16_to_cpu(fe->icbTag.strategyType) == 4096) */ 1137 UDF_I_STRAT4096(inode) = 1; 1138 1139 UDF_I_ALLOCTYPE(inode) = le16_to_cpu(fe->icbTag.flags) & ICBTAG_FLAG_AD_MASK; 1140 UDF_I_UNIQUE(inode) = 0; 1141 UDF_I_LENEATTR(inode) = 0; 1142 UDF_I_LENEXTENTS(inode) = 0; 1143 UDF_I_LENALLOC(inode) = 0; 1144 UDF_I_NEXT_ALLOC_BLOCK(inode) = 0; 1145 UDF_I_NEXT_ALLOC_GOAL(inode) = 0; 1146 if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_EFE) 1147 { 1148 UDF_I_EFE(inode) = 1; 1149 UDF_I_USE(inode) = 0; 1150 UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry), GFP_KERNEL); 1151 memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct extendedFileEntry), inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry)); 1152 } 1153 else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_FE) 1154 { 1155 UDF_I_EFE(inode) = 0; 1156 UDF_I_USE(inode) = 0; 1157 UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct fileEntry), GFP_KERNEL); 1158 memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct fileEntry), inode->i_sb->s_blocksize - sizeof(struct fileEntry)); 1159 } 1160 else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE) 1161 { 1162 UDF_I_EFE(inode) = 0; 1163 UDF_I_USE(inode) = 1; 1164 UDF_I_LENALLOC(inode) = 1165 le32_to_cpu( 1166 ((struct unallocSpaceEntry *)bh->b_data)->lengthAllocDescs); 1167 UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry), GFP_KERNEL); 1168 memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct unallocSpaceEntry), inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry)); 1169 return; 1170 } 1171 1172 inode->i_uid = le32_to_cpu(fe->uid); 1173 if (inode->i_uid == -1 || UDF_QUERY_FLAG(inode->i_sb, 1174 UDF_FLAG_UID_IGNORE)) 1175 inode->i_uid = UDF_SB(inode->i_sb)->s_uid; 1176 1177 inode->i_gid = le32_to_cpu(fe->gid); 1178 if (inode->i_gid == -1 || UDF_QUERY_FLAG(inode->i_sb, 1179 UDF_FLAG_GID_IGNORE)) 1180 inode->i_gid = UDF_SB(inode->i_sb)->s_gid; 1181 1182 inode->i_nlink = le16_to_cpu(fe->fileLinkCount); 1183 if (!inode->i_nlink) 1184 inode->i_nlink = 1; 1185 1186 inode->i_size = le64_to_cpu(fe->informationLength); 1187 UDF_I_LENEXTENTS(inode) = inode->i_size; 1188 1189 inode->i_mode = udf_convert_permissions(fe); 1190 inode->i_mode &= ~UDF_SB(inode->i_sb)->s_umask; 1191 1192 if (UDF_I_EFE(inode) == 0) 1193 { 1194 inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) << 1195 (inode->i_sb->s_blocksize_bits - 9); 1196 1197 if ( udf_stamp_to_time(&convtime, &convtime_usec, 1198 lets_to_cpu(fe->accessTime)) ) 1199 { 1200 inode->i_atime.tv_sec = convtime; 1201 inode->i_atime.tv_nsec = convtime_usec * 1000; 1202 } 1203 else 1204 { 1205 inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb); 1206 } 1207 1208 if ( udf_stamp_to_time(&convtime, &convtime_usec, 1209 lets_to_cpu(fe->modificationTime)) ) 1210 { 1211 inode->i_mtime.tv_sec = convtime; 1212 inode->i_mtime.tv_nsec = convtime_usec * 1000; 1213 } 1214 else 1215 { 1216 inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb); 1217 } 1218 1219 if ( udf_stamp_to_time(&convtime, &convtime_usec, 1220 lets_to_cpu(fe->attrTime)) ) 1221 { 1222 inode->i_ctime.tv_sec = convtime; 1223 inode->i_ctime.tv_nsec = convtime_usec * 1000; 1224 } 1225 else 1226 { 1227 inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb); 1228 } 1229 1230 UDF_I_UNIQUE(inode) = le64_to_cpu(fe->uniqueID); 1231 UDF_I_LENEATTR(inode) = le32_to_cpu(fe->lengthExtendedAttr); 1232 UDF_I_LENALLOC(inode) = le32_to_cpu(fe->lengthAllocDescs); 1233 offset = sizeof(struct fileEntry) + UDF_I_LENEATTR(inode); 1234 } 1235 else 1236 { 1237 inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) << 1238 (inode->i_sb->s_blocksize_bits - 9); 1239 1240 if ( udf_stamp_to_time(&convtime, &convtime_usec, 1241 lets_to_cpu(efe->accessTime)) ) 1242 { 1243 inode->i_atime.tv_sec = convtime; 1244 inode->i_atime.tv_nsec = convtime_usec * 1000; 1245 } 1246 else 1247 { 1248 inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb); 1249 } 1250 1251 if ( udf_stamp_to_time(&convtime, &convtime_usec, 1252 lets_to_cpu(efe->modificationTime)) ) 1253 { 1254 inode->i_mtime.tv_sec = convtime; 1255 inode->i_mtime.tv_nsec = convtime_usec * 1000; 1256 } 1257 else 1258 { 1259 inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb); 1260 } 1261 1262 if ( udf_stamp_to_time(&convtime, &convtime_usec, 1263 lets_to_cpu(efe->createTime)) ) 1264 { 1265 UDF_I_CRTIME(inode).tv_sec = convtime; 1266 UDF_I_CRTIME(inode).tv_nsec = convtime_usec * 1000; 1267 } 1268 else 1269 { 1270 UDF_I_CRTIME(inode) = UDF_SB_RECORDTIME(inode->i_sb); 1271 } 1272 1273 if ( udf_stamp_to_time(&convtime, &convtime_usec, 1274 lets_to_cpu(efe->attrTime)) ) 1275 { 1276 inode->i_ctime.tv_sec = convtime; 1277 inode->i_ctime.tv_nsec = convtime_usec * 1000; 1278 } 1279 else 1280 { 1281 inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb); 1282 } 1283 1284 UDF_I_UNIQUE(inode) = le64_to_cpu(efe->uniqueID); 1285 UDF_I_LENEATTR(inode) = le32_to_cpu(efe->lengthExtendedAttr); 1286 UDF_I_LENALLOC(inode) = le32_to_cpu(efe->lengthAllocDescs); 1287 offset = sizeof(struct extendedFileEntry) + UDF_I_LENEATTR(inode); 1288 } 1289 1290 switch (fe->icbTag.fileType) 1291 { 1292 case ICBTAG_FILE_TYPE_DIRECTORY: 1293 { 1294 inode->i_op = &udf_dir_inode_operations; 1295 inode->i_fop = &udf_dir_operations; 1296 inode->i_mode |= S_IFDIR; 1297 inc_nlink(inode); 1298 break; 1299 } 1300 case ICBTAG_FILE_TYPE_REALTIME: 1301 case ICBTAG_FILE_TYPE_REGULAR: 1302 case ICBTAG_FILE_TYPE_UNDEF: 1303 { 1304 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB) 1305 inode->i_data.a_ops = &udf_adinicb_aops; 1306 else 1307 inode->i_data.a_ops = &udf_aops; 1308 inode->i_op = &udf_file_inode_operations; 1309 inode->i_fop = &udf_file_operations; 1310 inode->i_mode |= S_IFREG; 1311 break; 1312 } 1313 case ICBTAG_FILE_TYPE_BLOCK: 1314 { 1315 inode->i_mode |= S_IFBLK; 1316 break; 1317 } 1318 case ICBTAG_FILE_TYPE_CHAR: 1319 { 1320 inode->i_mode |= S_IFCHR; 1321 break; 1322 } 1323 case ICBTAG_FILE_TYPE_FIFO: 1324 { 1325 init_special_inode(inode, inode->i_mode | S_IFIFO, 0); 1326 break; 1327 } 1328 case ICBTAG_FILE_TYPE_SOCKET: 1329 { 1330 init_special_inode(inode, inode->i_mode | S_IFSOCK, 0); 1331 break; 1332 } 1333 case ICBTAG_FILE_TYPE_SYMLINK: 1334 { 1335 inode->i_data.a_ops = &udf_symlink_aops; 1336 inode->i_op = &page_symlink_inode_operations; 1337 inode->i_mode = S_IFLNK|S_IRWXUGO; 1338 break; 1339 } 1340 default: 1341 { 1342 printk(KERN_ERR "udf: udf_fill_inode(ino %ld) failed unknown file type=%d\n", 1343 inode->i_ino, fe->icbTag.fileType); 1344 make_bad_inode(inode); 1345 return; 1346 } 1347 } 1348 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) 1349 { 1350 struct deviceSpec *dsea = 1351 (struct deviceSpec *) 1352 udf_get_extendedattr(inode, 12, 1); 1353 1354 if (dsea) 1355 { 1356 init_special_inode(inode, inode->i_mode, MKDEV( 1357 le32_to_cpu(dsea->majorDeviceIdent), 1358 le32_to_cpu(dsea->minorDeviceIdent))); 1359 /* Developer ID ??? */ 1360 } 1361 else 1362 { 1363 make_bad_inode(inode); 1364 } 1365 } 1366 } 1367 1368 static mode_t 1369 udf_convert_permissions(struct fileEntry *fe) 1370 { 1371 mode_t mode; 1372 uint32_t permissions; 1373 uint32_t flags; 1374 1375 permissions = le32_to_cpu(fe->permissions); 1376 flags = le16_to_cpu(fe->icbTag.flags); 1377 1378 mode = (( permissions ) & S_IRWXO) | 1379 (( permissions >> 2 ) & S_IRWXG) | 1380 (( permissions >> 4 ) & S_IRWXU) | 1381 (( flags & ICBTAG_FLAG_SETUID) ? S_ISUID : 0) | 1382 (( flags & ICBTAG_FLAG_SETGID) ? S_ISGID : 0) | 1383 (( flags & ICBTAG_FLAG_STICKY) ? S_ISVTX : 0); 1384 1385 return mode; 1386 } 1387 1388 /* 1389 * udf_write_inode 1390 * 1391 * PURPOSE 1392 * Write out the specified inode. 1393 * 1394 * DESCRIPTION 1395 * This routine is called whenever an inode is synced. 1396 * Currently this routine is just a placeholder. 1397 * 1398 * HISTORY 1399 * July 1, 1997 - Andrew E. Mileski 1400 * Written, tested, and released. 1401 */ 1402 1403 int udf_write_inode(struct inode * inode, int sync) 1404 { 1405 int ret; 1406 lock_kernel(); 1407 ret = udf_update_inode(inode, sync); 1408 unlock_kernel(); 1409 return ret; 1410 } 1411 1412 int udf_sync_inode(struct inode * inode) 1413 { 1414 return udf_update_inode(inode, 1); 1415 } 1416 1417 static int 1418 udf_update_inode(struct inode *inode, int do_sync) 1419 { 1420 struct buffer_head *bh = NULL; 1421 struct fileEntry *fe; 1422 struct extendedFileEntry *efe; 1423 uint32_t udfperms; 1424 uint16_t icbflags; 1425 uint16_t crclen; 1426 int i; 1427 kernel_timestamp cpu_time; 1428 int err = 0; 1429 1430 bh = udf_tread(inode->i_sb, 1431 udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0)); 1432 1433 if (!bh) 1434 { 1435 udf_debug("bread failure\n"); 1436 return -EIO; 1437 } 1438 1439 memset(bh->b_data, 0x00, inode->i_sb->s_blocksize); 1440 1441 fe = (struct fileEntry *)bh->b_data; 1442 efe = (struct extendedFileEntry *)bh->b_data; 1443 1444 if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE) 1445 { 1446 struct unallocSpaceEntry *use = 1447 (struct unallocSpaceEntry *)bh->b_data; 1448 1449 use->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode)); 1450 memcpy(bh->b_data + sizeof(struct unallocSpaceEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry)); 1451 crclen = sizeof(struct unallocSpaceEntry) + UDF_I_LENALLOC(inode) - 1452 sizeof(tag); 1453 use->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum); 1454 use->descTag.descCRCLength = cpu_to_le16(crclen); 1455 use->descTag.descCRC = cpu_to_le16(udf_crc((char *)use + sizeof(tag), crclen, 0)); 1456 1457 use->descTag.tagChecksum = 0; 1458 for (i=0; i<16; i++) 1459 if (i != 4) 1460 use->descTag.tagChecksum += ((uint8_t *)&(use->descTag))[i]; 1461 1462 mark_buffer_dirty(bh); 1463 brelse(bh); 1464 return err; 1465 } 1466 1467 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_FORGET)) 1468 fe->uid = cpu_to_le32(-1); 1469 else fe->uid = cpu_to_le32(inode->i_uid); 1470 1471 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_FORGET)) 1472 fe->gid = cpu_to_le32(-1); 1473 else fe->gid = cpu_to_le32(inode->i_gid); 1474 1475 udfperms = ((inode->i_mode & S_IRWXO) ) | 1476 ((inode->i_mode & S_IRWXG) << 2) | 1477 ((inode->i_mode & S_IRWXU) << 4); 1478 1479 udfperms |= (le32_to_cpu(fe->permissions) & 1480 (FE_PERM_O_DELETE | FE_PERM_O_CHATTR | 1481 FE_PERM_G_DELETE | FE_PERM_G_CHATTR | 1482 FE_PERM_U_DELETE | FE_PERM_U_CHATTR)); 1483 fe->permissions = cpu_to_le32(udfperms); 1484 1485 if (S_ISDIR(inode->i_mode)) 1486 fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1); 1487 else 1488 fe->fileLinkCount = cpu_to_le16(inode->i_nlink); 1489 1490 fe->informationLength = cpu_to_le64(inode->i_size); 1491 1492 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) 1493 { 1494 regid *eid; 1495 struct deviceSpec *dsea = 1496 (struct deviceSpec *) 1497 udf_get_extendedattr(inode, 12, 1); 1498 1499 if (!dsea) 1500 { 1501 dsea = (struct deviceSpec *) 1502 udf_add_extendedattr(inode, 1503 sizeof(struct deviceSpec) + 1504 sizeof(regid), 12, 0x3); 1505 dsea->attrType = cpu_to_le32(12); 1506 dsea->attrSubtype = 1; 1507 dsea->attrLength = cpu_to_le32(sizeof(struct deviceSpec) + 1508 sizeof(regid)); 1509 dsea->impUseLength = cpu_to_le32(sizeof(regid)); 1510 } 1511 eid = (regid *)dsea->impUse; 1512 memset(eid, 0, sizeof(regid)); 1513 strcpy(eid->ident, UDF_ID_DEVELOPER); 1514 eid->identSuffix[0] = UDF_OS_CLASS_UNIX; 1515 eid->identSuffix[1] = UDF_OS_ID_LINUX; 1516 dsea->majorDeviceIdent = cpu_to_le32(imajor(inode)); 1517 dsea->minorDeviceIdent = cpu_to_le32(iminor(inode)); 1518 } 1519 1520 if (UDF_I_EFE(inode) == 0) 1521 { 1522 memcpy(bh->b_data + sizeof(struct fileEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct fileEntry)); 1523 fe->logicalBlocksRecorded = cpu_to_le64( 1524 (inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >> 1525 (inode->i_sb->s_blocksize_bits - 9)); 1526 1527 if (udf_time_to_stamp(&cpu_time, inode->i_atime)) 1528 fe->accessTime = cpu_to_lets(cpu_time); 1529 if (udf_time_to_stamp(&cpu_time, inode->i_mtime)) 1530 fe->modificationTime = cpu_to_lets(cpu_time); 1531 if (udf_time_to_stamp(&cpu_time, inode->i_ctime)) 1532 fe->attrTime = cpu_to_lets(cpu_time); 1533 memset(&(fe->impIdent), 0, sizeof(regid)); 1534 strcpy(fe->impIdent.ident, UDF_ID_DEVELOPER); 1535 fe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX; 1536 fe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX; 1537 fe->uniqueID = cpu_to_le64(UDF_I_UNIQUE(inode)); 1538 fe->lengthExtendedAttr = cpu_to_le32(UDF_I_LENEATTR(inode)); 1539 fe->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode)); 1540 fe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_FE); 1541 crclen = sizeof(struct fileEntry); 1542 } 1543 else 1544 { 1545 memcpy(bh->b_data + sizeof(struct extendedFileEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry)); 1546 efe->objectSize = cpu_to_le64(inode->i_size); 1547 efe->logicalBlocksRecorded = cpu_to_le64( 1548 (inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >> 1549 (inode->i_sb->s_blocksize_bits - 9)); 1550 1551 if (UDF_I_CRTIME(inode).tv_sec > inode->i_atime.tv_sec || 1552 (UDF_I_CRTIME(inode).tv_sec == inode->i_atime.tv_sec && 1553 UDF_I_CRTIME(inode).tv_nsec > inode->i_atime.tv_nsec)) 1554 { 1555 UDF_I_CRTIME(inode) = inode->i_atime; 1556 } 1557 if (UDF_I_CRTIME(inode).tv_sec > inode->i_mtime.tv_sec || 1558 (UDF_I_CRTIME(inode).tv_sec == inode->i_mtime.tv_sec && 1559 UDF_I_CRTIME(inode).tv_nsec > inode->i_mtime.tv_nsec)) 1560 { 1561 UDF_I_CRTIME(inode) = inode->i_mtime; 1562 } 1563 if (UDF_I_CRTIME(inode).tv_sec > inode->i_ctime.tv_sec || 1564 (UDF_I_CRTIME(inode).tv_sec == inode->i_ctime.tv_sec && 1565 UDF_I_CRTIME(inode).tv_nsec > inode->i_ctime.tv_nsec)) 1566 { 1567 UDF_I_CRTIME(inode) = inode->i_ctime; 1568 } 1569 1570 if (udf_time_to_stamp(&cpu_time, inode->i_atime)) 1571 efe->accessTime = cpu_to_lets(cpu_time); 1572 if (udf_time_to_stamp(&cpu_time, inode->i_mtime)) 1573 efe->modificationTime = cpu_to_lets(cpu_time); 1574 if (udf_time_to_stamp(&cpu_time, UDF_I_CRTIME(inode))) 1575 efe->createTime = cpu_to_lets(cpu_time); 1576 if (udf_time_to_stamp(&cpu_time, inode->i_ctime)) 1577 efe->attrTime = cpu_to_lets(cpu_time); 1578 1579 memset(&(efe->impIdent), 0, sizeof(regid)); 1580 strcpy(efe->impIdent.ident, UDF_ID_DEVELOPER); 1581 efe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX; 1582 efe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX; 1583 efe->uniqueID = cpu_to_le64(UDF_I_UNIQUE(inode)); 1584 efe->lengthExtendedAttr = cpu_to_le32(UDF_I_LENEATTR(inode)); 1585 efe->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode)); 1586 efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE); 1587 crclen = sizeof(struct extendedFileEntry); 1588 } 1589 if (UDF_I_STRAT4096(inode)) 1590 { 1591 fe->icbTag.strategyType = cpu_to_le16(4096); 1592 fe->icbTag.strategyParameter = cpu_to_le16(1); 1593 fe->icbTag.numEntries = cpu_to_le16(2); 1594 } 1595 else 1596 { 1597 fe->icbTag.strategyType = cpu_to_le16(4); 1598 fe->icbTag.numEntries = cpu_to_le16(1); 1599 } 1600 1601 if (S_ISDIR(inode->i_mode)) 1602 fe->icbTag.fileType = ICBTAG_FILE_TYPE_DIRECTORY; 1603 else if (S_ISREG(inode->i_mode)) 1604 fe->icbTag.fileType = ICBTAG_FILE_TYPE_REGULAR; 1605 else if (S_ISLNK(inode->i_mode)) 1606 fe->icbTag.fileType = ICBTAG_FILE_TYPE_SYMLINK; 1607 else if (S_ISBLK(inode->i_mode)) 1608 fe->icbTag.fileType = ICBTAG_FILE_TYPE_BLOCK; 1609 else if (S_ISCHR(inode->i_mode)) 1610 fe->icbTag.fileType = ICBTAG_FILE_TYPE_CHAR; 1611 else if (S_ISFIFO(inode->i_mode)) 1612 fe->icbTag.fileType = ICBTAG_FILE_TYPE_FIFO; 1613 else if (S_ISSOCK(inode->i_mode)) 1614 fe->icbTag.fileType = ICBTAG_FILE_TYPE_SOCKET; 1615 1616 icbflags = UDF_I_ALLOCTYPE(inode) | 1617 ((inode->i_mode & S_ISUID) ? ICBTAG_FLAG_SETUID : 0) | 1618 ((inode->i_mode & S_ISGID) ? ICBTAG_FLAG_SETGID : 0) | 1619 ((inode->i_mode & S_ISVTX) ? ICBTAG_FLAG_STICKY : 0) | 1620 (le16_to_cpu(fe->icbTag.flags) & 1621 ~(ICBTAG_FLAG_AD_MASK | ICBTAG_FLAG_SETUID | 1622 ICBTAG_FLAG_SETGID | ICBTAG_FLAG_STICKY)); 1623 1624 fe->icbTag.flags = cpu_to_le16(icbflags); 1625 if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200) 1626 fe->descTag.descVersion = cpu_to_le16(3); 1627 else 1628 fe->descTag.descVersion = cpu_to_le16(2); 1629 fe->descTag.tagSerialNum = cpu_to_le16(UDF_SB_SERIALNUM(inode->i_sb)); 1630 fe->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum); 1631 crclen += UDF_I_LENEATTR(inode) + UDF_I_LENALLOC(inode) - sizeof(tag); 1632 fe->descTag.descCRCLength = cpu_to_le16(crclen); 1633 fe->descTag.descCRC = cpu_to_le16(udf_crc((char *)fe + sizeof(tag), crclen, 0)); 1634 1635 fe->descTag.tagChecksum = 0; 1636 for (i=0; i<16; i++) 1637 if (i != 4) 1638 fe->descTag.tagChecksum += ((uint8_t *)&(fe->descTag))[i]; 1639 1640 /* write the data blocks */ 1641 mark_buffer_dirty(bh); 1642 if (do_sync) 1643 { 1644 sync_dirty_buffer(bh); 1645 if (buffer_req(bh) && !buffer_uptodate(bh)) 1646 { 1647 printk("IO error syncing udf inode [%s:%08lx]\n", 1648 inode->i_sb->s_id, inode->i_ino); 1649 err = -EIO; 1650 } 1651 } 1652 brelse(bh); 1653 return err; 1654 } 1655 1656 struct inode * 1657 udf_iget(struct super_block *sb, kernel_lb_addr ino) 1658 { 1659 unsigned long block = udf_get_lb_pblock(sb, ino, 0); 1660 struct inode *inode = iget_locked(sb, block); 1661 1662 if (!inode) 1663 return NULL; 1664 1665 if (inode->i_state & I_NEW) { 1666 memcpy(&UDF_I_LOCATION(inode), &ino, sizeof(kernel_lb_addr)); 1667 __udf_read_inode(inode); 1668 unlock_new_inode(inode); 1669 } 1670 1671 if (is_bad_inode(inode)) 1672 goto out_iput; 1673 1674 if (ino.logicalBlockNum >= UDF_SB_PARTLEN(sb, ino.partitionReferenceNum)) { 1675 udf_debug("block=%d, partition=%d out of range\n", 1676 ino.logicalBlockNum, ino.partitionReferenceNum); 1677 make_bad_inode(inode); 1678 goto out_iput; 1679 } 1680 1681 return inode; 1682 1683 out_iput: 1684 iput(inode); 1685 return NULL; 1686 } 1687 1688 int8_t udf_add_aext(struct inode *inode, struct extent_position *epos, 1689 kernel_lb_addr eloc, uint32_t elen, int inc) 1690 { 1691 int adsize; 1692 short_ad *sad = NULL; 1693 long_ad *lad = NULL; 1694 struct allocExtDesc *aed; 1695 int8_t etype; 1696 uint8_t *ptr; 1697 1698 if (!epos->bh) 1699 ptr = UDF_I_DATA(inode) + epos->offset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode); 1700 else 1701 ptr = epos->bh->b_data + epos->offset; 1702 1703 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT) 1704 adsize = sizeof(short_ad); 1705 else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG) 1706 adsize = sizeof(long_ad); 1707 else 1708 return -1; 1709 1710 if (epos->offset + (2 * adsize) > inode->i_sb->s_blocksize) 1711 { 1712 char *sptr, *dptr; 1713 struct buffer_head *nbh; 1714 int err, loffset; 1715 kernel_lb_addr obloc = epos->block; 1716 1717 if (!(epos->block.logicalBlockNum = udf_new_block(inode->i_sb, NULL, 1718 obloc.partitionReferenceNum, obloc.logicalBlockNum, &err))) 1719 { 1720 return -1; 1721 } 1722 if (!(nbh = udf_tgetblk(inode->i_sb, udf_get_lb_pblock(inode->i_sb, 1723 epos->block, 0)))) 1724 { 1725 return -1; 1726 } 1727 lock_buffer(nbh); 1728 memset(nbh->b_data, 0x00, inode->i_sb->s_blocksize); 1729 set_buffer_uptodate(nbh); 1730 unlock_buffer(nbh); 1731 mark_buffer_dirty_inode(nbh, inode); 1732 1733 aed = (struct allocExtDesc *)(nbh->b_data); 1734 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT)) 1735 aed->previousAllocExtLocation = cpu_to_le32(obloc.logicalBlockNum); 1736 if (epos->offset + adsize > inode->i_sb->s_blocksize) 1737 { 1738 loffset = epos->offset; 1739 aed->lengthAllocDescs = cpu_to_le32(adsize); 1740 sptr = ptr - adsize; 1741 dptr = nbh->b_data + sizeof(struct allocExtDesc); 1742 memcpy(dptr, sptr, adsize); 1743 epos->offset = sizeof(struct allocExtDesc) + adsize; 1744 } 1745 else 1746 { 1747 loffset = epos->offset + adsize; 1748 aed->lengthAllocDescs = cpu_to_le32(0); 1749 sptr = ptr; 1750 epos->offset = sizeof(struct allocExtDesc); 1751 1752 if (epos->bh) 1753 { 1754 aed = (struct allocExtDesc *)epos->bh->b_data; 1755 aed->lengthAllocDescs = 1756 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize); 1757 } 1758 else 1759 { 1760 UDF_I_LENALLOC(inode) += adsize; 1761 mark_inode_dirty(inode); 1762 } 1763 } 1764 if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200) 1765 udf_new_tag(nbh->b_data, TAG_IDENT_AED, 3, 1, 1766 epos->block.logicalBlockNum, sizeof(tag)); 1767 else 1768 udf_new_tag(nbh->b_data, TAG_IDENT_AED, 2, 1, 1769 epos->block.logicalBlockNum, sizeof(tag)); 1770 switch (UDF_I_ALLOCTYPE(inode)) 1771 { 1772 case ICBTAG_FLAG_AD_SHORT: 1773 { 1774 sad = (short_ad *)sptr; 1775 sad->extLength = cpu_to_le32( 1776 EXT_NEXT_EXTENT_ALLOCDECS | 1777 inode->i_sb->s_blocksize); 1778 sad->extPosition = cpu_to_le32(epos->block.logicalBlockNum); 1779 break; 1780 } 1781 case ICBTAG_FLAG_AD_LONG: 1782 { 1783 lad = (long_ad *)sptr; 1784 lad->extLength = cpu_to_le32( 1785 EXT_NEXT_EXTENT_ALLOCDECS | 1786 inode->i_sb->s_blocksize); 1787 lad->extLocation = cpu_to_lelb(epos->block); 1788 memset(lad->impUse, 0x00, sizeof(lad->impUse)); 1789 break; 1790 } 1791 } 1792 if (epos->bh) 1793 { 1794 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201) 1795 udf_update_tag(epos->bh->b_data, loffset); 1796 else 1797 udf_update_tag(epos->bh->b_data, sizeof(struct allocExtDesc)); 1798 mark_buffer_dirty_inode(epos->bh, inode); 1799 brelse(epos->bh); 1800 } 1801 else 1802 mark_inode_dirty(inode); 1803 epos->bh = nbh; 1804 } 1805 1806 etype = udf_write_aext(inode, epos, eloc, elen, inc); 1807 1808 if (!epos->bh) 1809 { 1810 UDF_I_LENALLOC(inode) += adsize; 1811 mark_inode_dirty(inode); 1812 } 1813 else 1814 { 1815 aed = (struct allocExtDesc *)epos->bh->b_data; 1816 aed->lengthAllocDescs = 1817 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize); 1818 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201) 1819 udf_update_tag(epos->bh->b_data, epos->offset + (inc ? 0 : adsize)); 1820 else 1821 udf_update_tag(epos->bh->b_data, sizeof(struct allocExtDesc)); 1822 mark_buffer_dirty_inode(epos->bh, inode); 1823 } 1824 1825 return etype; 1826 } 1827 1828 int8_t udf_write_aext(struct inode *inode, struct extent_position *epos, 1829 kernel_lb_addr eloc, uint32_t elen, int inc) 1830 { 1831 int adsize; 1832 uint8_t *ptr; 1833 1834 if (!epos->bh) 1835 ptr = UDF_I_DATA(inode) + epos->offset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode); 1836 else 1837 ptr = epos->bh->b_data + epos->offset; 1838 1839 switch (UDF_I_ALLOCTYPE(inode)) 1840 { 1841 case ICBTAG_FLAG_AD_SHORT: 1842 { 1843 short_ad *sad = (short_ad *)ptr; 1844 sad->extLength = cpu_to_le32(elen); 1845 sad->extPosition = cpu_to_le32(eloc.logicalBlockNum); 1846 adsize = sizeof(short_ad); 1847 break; 1848 } 1849 case ICBTAG_FLAG_AD_LONG: 1850 { 1851 long_ad *lad = (long_ad *)ptr; 1852 lad->extLength = cpu_to_le32(elen); 1853 lad->extLocation = cpu_to_lelb(eloc); 1854 memset(lad->impUse, 0x00, sizeof(lad->impUse)); 1855 adsize = sizeof(long_ad); 1856 break; 1857 } 1858 default: 1859 return -1; 1860 } 1861 1862 if (epos->bh) 1863 { 1864 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201) 1865 { 1866 struct allocExtDesc *aed = (struct allocExtDesc *)epos->bh->b_data; 1867 udf_update_tag(epos->bh->b_data, 1868 le32_to_cpu(aed->lengthAllocDescs) + sizeof(struct allocExtDesc)); 1869 } 1870 mark_buffer_dirty_inode(epos->bh, inode); 1871 } 1872 else 1873 mark_inode_dirty(inode); 1874 1875 if (inc) 1876 epos->offset += adsize; 1877 return (elen >> 30); 1878 } 1879 1880 int8_t udf_next_aext(struct inode *inode, struct extent_position *epos, 1881 kernel_lb_addr *eloc, uint32_t *elen, int inc) 1882 { 1883 int8_t etype; 1884 1885 while ((etype = udf_current_aext(inode, epos, eloc, elen, inc)) == 1886 (EXT_NEXT_EXTENT_ALLOCDECS >> 30)) 1887 { 1888 epos->block = *eloc; 1889 epos->offset = sizeof(struct allocExtDesc); 1890 brelse(epos->bh); 1891 if (!(epos->bh = udf_tread(inode->i_sb, udf_get_lb_pblock(inode->i_sb, epos->block, 0)))) 1892 { 1893 udf_debug("reading block %d failed!\n", 1894 udf_get_lb_pblock(inode->i_sb, epos->block, 0)); 1895 return -1; 1896 } 1897 } 1898 1899 return etype; 1900 } 1901 1902 int8_t udf_current_aext(struct inode *inode, struct extent_position *epos, 1903 kernel_lb_addr *eloc, uint32_t *elen, int inc) 1904 { 1905 int alen; 1906 int8_t etype; 1907 uint8_t *ptr; 1908 1909 if (!epos->bh) 1910 { 1911 if (!epos->offset) 1912 epos->offset = udf_file_entry_alloc_offset(inode); 1913 ptr = UDF_I_DATA(inode) + epos->offset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode); 1914 alen = udf_file_entry_alloc_offset(inode) + UDF_I_LENALLOC(inode); 1915 } 1916 else 1917 { 1918 if (!epos->offset) 1919 epos->offset = sizeof(struct allocExtDesc); 1920 ptr = epos->bh->b_data + epos->offset; 1921 alen = sizeof(struct allocExtDesc) + le32_to_cpu(((struct allocExtDesc *)epos->bh->b_data)->lengthAllocDescs); 1922 } 1923 1924 switch (UDF_I_ALLOCTYPE(inode)) 1925 { 1926 case ICBTAG_FLAG_AD_SHORT: 1927 { 1928 short_ad *sad; 1929 1930 if (!(sad = udf_get_fileshortad(ptr, alen, &epos->offset, inc))) 1931 return -1; 1932 1933 etype = le32_to_cpu(sad->extLength) >> 30; 1934 eloc->logicalBlockNum = le32_to_cpu(sad->extPosition); 1935 eloc->partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum; 1936 *elen = le32_to_cpu(sad->extLength) & UDF_EXTENT_LENGTH_MASK; 1937 break; 1938 } 1939 case ICBTAG_FLAG_AD_LONG: 1940 { 1941 long_ad *lad; 1942 1943 if (!(lad = udf_get_filelongad(ptr, alen, &epos->offset, inc))) 1944 return -1; 1945 1946 etype = le32_to_cpu(lad->extLength) >> 30; 1947 *eloc = lelb_to_cpu(lad->extLocation); 1948 *elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK; 1949 break; 1950 } 1951 default: 1952 { 1953 udf_debug("alloc_type = %d unsupported\n", UDF_I_ALLOCTYPE(inode)); 1954 return -1; 1955 } 1956 } 1957 1958 return etype; 1959 } 1960 1961 static int8_t 1962 udf_insert_aext(struct inode *inode, struct extent_position epos, 1963 kernel_lb_addr neloc, uint32_t nelen) 1964 { 1965 kernel_lb_addr oeloc; 1966 uint32_t oelen; 1967 int8_t etype; 1968 1969 if (epos.bh) 1970 get_bh(epos.bh); 1971 1972 while ((etype = udf_next_aext(inode, &epos, &oeloc, &oelen, 0)) != -1) 1973 { 1974 udf_write_aext(inode, &epos, neloc, nelen, 1); 1975 1976 neloc = oeloc; 1977 nelen = (etype << 30) | oelen; 1978 } 1979 udf_add_aext(inode, &epos, neloc, nelen, 1); 1980 brelse(epos.bh); 1981 return (nelen >> 30); 1982 } 1983 1984 int8_t udf_delete_aext(struct inode *inode, struct extent_position epos, 1985 kernel_lb_addr eloc, uint32_t elen) 1986 { 1987 struct extent_position oepos; 1988 int adsize; 1989 int8_t etype; 1990 struct allocExtDesc *aed; 1991 1992 if (epos.bh) 1993 { 1994 get_bh(epos.bh); 1995 get_bh(epos.bh); 1996 } 1997 1998 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT) 1999 adsize = sizeof(short_ad); 2000 else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG) 2001 adsize = sizeof(long_ad); 2002 else 2003 adsize = 0; 2004 2005 oepos = epos; 2006 if (udf_next_aext(inode, &epos, &eloc, &elen, 1) == -1) 2007 return -1; 2008 2009 while ((etype = udf_next_aext(inode, &epos, &eloc, &elen, 1)) != -1) 2010 { 2011 udf_write_aext(inode, &oepos, eloc, (etype << 30) | elen, 1); 2012 if (oepos.bh != epos.bh) 2013 { 2014 oepos.block = epos.block; 2015 brelse(oepos.bh); 2016 get_bh(epos.bh); 2017 oepos.bh = epos.bh; 2018 oepos.offset = epos.offset - adsize; 2019 } 2020 } 2021 memset(&eloc, 0x00, sizeof(kernel_lb_addr)); 2022 elen = 0; 2023 2024 if (epos.bh != oepos.bh) 2025 { 2026 udf_free_blocks(inode->i_sb, inode, epos.block, 0, 1); 2027 udf_write_aext(inode, &oepos, eloc, elen, 1); 2028 udf_write_aext(inode, &oepos, eloc, elen, 1); 2029 if (!oepos.bh) 2030 { 2031 UDF_I_LENALLOC(inode) -= (adsize * 2); 2032 mark_inode_dirty(inode); 2033 } 2034 else 2035 { 2036 aed = (struct allocExtDesc *)oepos.bh->b_data; 2037 aed->lengthAllocDescs = 2038 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - (2*adsize)); 2039 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201) 2040 udf_update_tag(oepos.bh->b_data, oepos.offset - (2*adsize)); 2041 else 2042 udf_update_tag(oepos.bh->b_data, sizeof(struct allocExtDesc)); 2043 mark_buffer_dirty_inode(oepos.bh, inode); 2044 } 2045 } 2046 else 2047 { 2048 udf_write_aext(inode, &oepos, eloc, elen, 1); 2049 if (!oepos.bh) 2050 { 2051 UDF_I_LENALLOC(inode) -= adsize; 2052 mark_inode_dirty(inode); 2053 } 2054 else 2055 { 2056 aed = (struct allocExtDesc *)oepos.bh->b_data; 2057 aed->lengthAllocDescs = 2058 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - adsize); 2059 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201) 2060 udf_update_tag(oepos.bh->b_data, epos.offset - adsize); 2061 else 2062 udf_update_tag(oepos.bh->b_data, sizeof(struct allocExtDesc)); 2063 mark_buffer_dirty_inode(oepos.bh, inode); 2064 } 2065 } 2066 2067 brelse(epos.bh); 2068 brelse(oepos.bh); 2069 return (elen >> 30); 2070 } 2071 2072 int8_t inode_bmap(struct inode *inode, sector_t block, struct extent_position *pos, 2073 kernel_lb_addr *eloc, uint32_t *elen, sector_t *offset) 2074 { 2075 loff_t lbcount = 0, bcount = (loff_t)block << inode->i_sb->s_blocksize_bits; 2076 int8_t etype; 2077 2078 if (block < 0) 2079 { 2080 printk(KERN_ERR "udf: inode_bmap: block < 0\n"); 2081 return -1; 2082 } 2083 2084 pos->offset = 0; 2085 pos->block = UDF_I_LOCATION(inode); 2086 pos->bh = NULL; 2087 *elen = 0; 2088 2089 do 2090 { 2091 if ((etype = udf_next_aext(inode, pos, eloc, elen, 1)) == -1) 2092 { 2093 *offset = (bcount - lbcount) >> inode->i_sb->s_blocksize_bits; 2094 UDF_I_LENEXTENTS(inode) = lbcount; 2095 return -1; 2096 } 2097 lbcount += *elen; 2098 } while (lbcount <= bcount); 2099 2100 *offset = (bcount + *elen - lbcount) >> inode->i_sb->s_blocksize_bits; 2101 2102 return etype; 2103 } 2104 2105 long udf_block_map(struct inode *inode, sector_t block) 2106 { 2107 kernel_lb_addr eloc; 2108 uint32_t elen; 2109 sector_t offset; 2110 struct extent_position epos = { NULL, 0, { 0, 0}}; 2111 int ret; 2112 2113 lock_kernel(); 2114 2115 if (inode_bmap(inode, block, &epos, &eloc, &elen, &offset) == (EXT_RECORDED_ALLOCATED >> 30)) 2116 ret = udf_get_lb_pblock(inode->i_sb, eloc, offset); 2117 else 2118 ret = 0; 2119 2120 unlock_kernel(); 2121 brelse(epos.bh); 2122 2123 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_VARCONV)) 2124 return udf_fixed_to_variable(ret); 2125 else 2126 return ret; 2127 } 2128