1 /* 2 * inode.c 3 * 4 * PURPOSE 5 * Inode handling routines for the OSTA-UDF(tm) filesystem. 6 * 7 * COPYRIGHT 8 * This file is distributed under the terms of the GNU General Public 9 * License (GPL). Copies of the GPL can be obtained from: 10 * ftp://prep.ai.mit.edu/pub/gnu/GPL 11 * Each contributing author retains all rights to their own work. 12 * 13 * (C) 1998 Dave Boynton 14 * (C) 1998-2004 Ben Fennema 15 * (C) 1999-2000 Stelias Computing Inc 16 * 17 * HISTORY 18 * 19 * 10/04/98 dgb Added rudimentary directory functions 20 * 10/07/98 Fully working udf_block_map! It works! 21 * 11/25/98 bmap altered to better support extents 22 * 12/06/98 blf partition support in udf_iget, udf_block_map 23 * and udf_read_inode 24 * 12/12/98 rewrote udf_block_map to handle next extents and descs across 25 * block boundaries (which is not actually allowed) 26 * 12/20/98 added support for strategy 4096 27 * 03/07/99 rewrote udf_block_map (again) 28 * New funcs, inode_bmap, udf_next_aext 29 * 04/19/99 Support for writing device EA's for major/minor # 30 */ 31 32 #include "udfdecl.h" 33 #include <linux/mm.h> 34 #include <linux/module.h> 35 #include <linux/pagemap.h> 36 #include <linux/writeback.h> 37 #include <linux/slab.h> 38 #include <linux/crc-itu-t.h> 39 #include <linux/mpage.h> 40 #include <linux/uio.h> 41 #include <linux/bio.h> 42 43 #include "udf_i.h" 44 #include "udf_sb.h" 45 46 #define EXTENT_MERGE_SIZE 5 47 48 #define FE_MAPPED_PERMS (FE_PERM_U_READ | FE_PERM_U_WRITE | FE_PERM_U_EXEC | \ 49 FE_PERM_G_READ | FE_PERM_G_WRITE | FE_PERM_G_EXEC | \ 50 FE_PERM_O_READ | FE_PERM_O_WRITE | FE_PERM_O_EXEC) 51 52 #define FE_DELETE_PERMS (FE_PERM_U_DELETE | FE_PERM_G_DELETE | \ 53 FE_PERM_O_DELETE) 54 55 static umode_t udf_convert_permissions(struct fileEntry *); 56 static int udf_update_inode(struct inode *, int); 57 static int udf_sync_inode(struct inode *inode); 58 static int udf_alloc_i_data(struct inode *inode, size_t size); 59 static sector_t inode_getblk(struct inode *, sector_t, int *, int *); 60 static int8_t udf_insert_aext(struct inode *, struct extent_position, 61 struct kernel_lb_addr, uint32_t); 62 static void udf_split_extents(struct inode *, int *, int, udf_pblk_t, 63 struct kernel_long_ad *, int *); 64 static void udf_prealloc_extents(struct inode *, int, int, 65 struct kernel_long_ad *, int *); 66 static void udf_merge_extents(struct inode *, struct kernel_long_ad *, int *); 67 static void udf_update_extents(struct inode *, struct kernel_long_ad *, int, 68 int, struct extent_position *); 69 static int udf_get_block(struct inode *, sector_t, struct buffer_head *, int); 70 71 static void __udf_clear_extent_cache(struct inode *inode) 72 { 73 struct udf_inode_info *iinfo = UDF_I(inode); 74 75 if (iinfo->cached_extent.lstart != -1) { 76 brelse(iinfo->cached_extent.epos.bh); 77 iinfo->cached_extent.lstart = -1; 78 } 79 } 80 81 /* Invalidate extent cache */ 82 static void udf_clear_extent_cache(struct inode *inode) 83 { 84 struct udf_inode_info *iinfo = UDF_I(inode); 85 86 spin_lock(&iinfo->i_extent_cache_lock); 87 __udf_clear_extent_cache(inode); 88 spin_unlock(&iinfo->i_extent_cache_lock); 89 } 90 91 /* Return contents of extent cache */ 92 static int udf_read_extent_cache(struct inode *inode, loff_t bcount, 93 loff_t *lbcount, struct extent_position *pos) 94 { 95 struct udf_inode_info *iinfo = UDF_I(inode); 96 int ret = 0; 97 98 spin_lock(&iinfo->i_extent_cache_lock); 99 if ((iinfo->cached_extent.lstart <= bcount) && 100 (iinfo->cached_extent.lstart != -1)) { 101 /* Cache hit */ 102 *lbcount = iinfo->cached_extent.lstart; 103 memcpy(pos, &iinfo->cached_extent.epos, 104 sizeof(struct extent_position)); 105 if (pos->bh) 106 get_bh(pos->bh); 107 ret = 1; 108 } 109 spin_unlock(&iinfo->i_extent_cache_lock); 110 return ret; 111 } 112 113 /* Add extent to extent cache */ 114 static void udf_update_extent_cache(struct inode *inode, loff_t estart, 115 struct extent_position *pos) 116 { 117 struct udf_inode_info *iinfo = UDF_I(inode); 118 119 spin_lock(&iinfo->i_extent_cache_lock); 120 /* Invalidate previously cached extent */ 121 __udf_clear_extent_cache(inode); 122 if (pos->bh) 123 get_bh(pos->bh); 124 memcpy(&iinfo->cached_extent.epos, pos, sizeof(*pos)); 125 iinfo->cached_extent.lstart = estart; 126 switch (iinfo->i_alloc_type) { 127 case ICBTAG_FLAG_AD_SHORT: 128 iinfo->cached_extent.epos.offset -= sizeof(struct short_ad); 129 break; 130 case ICBTAG_FLAG_AD_LONG: 131 iinfo->cached_extent.epos.offset -= sizeof(struct long_ad); 132 break; 133 } 134 spin_unlock(&iinfo->i_extent_cache_lock); 135 } 136 137 void udf_evict_inode(struct inode *inode) 138 { 139 struct udf_inode_info *iinfo = UDF_I(inode); 140 int want_delete = 0; 141 142 if (!inode->i_nlink && !is_bad_inode(inode)) { 143 want_delete = 1; 144 udf_setsize(inode, 0); 145 udf_update_inode(inode, IS_SYNC(inode)); 146 } 147 truncate_inode_pages_final(&inode->i_data); 148 invalidate_inode_buffers(inode); 149 clear_inode(inode); 150 if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB && 151 inode->i_size != iinfo->i_lenExtents) { 152 udf_warn(inode->i_sb, "Inode %lu (mode %o) has inode size %llu different from extent length %llu. Filesystem need not be standards compliant.\n", 153 inode->i_ino, inode->i_mode, 154 (unsigned long long)inode->i_size, 155 (unsigned long long)iinfo->i_lenExtents); 156 } 157 kfree(iinfo->i_ext.i_data); 158 iinfo->i_ext.i_data = NULL; 159 udf_clear_extent_cache(inode); 160 if (want_delete) { 161 udf_free_inode(inode); 162 } 163 } 164 165 static void udf_write_failed(struct address_space *mapping, loff_t to) 166 { 167 struct inode *inode = mapping->host; 168 struct udf_inode_info *iinfo = UDF_I(inode); 169 loff_t isize = inode->i_size; 170 171 if (to > isize) { 172 truncate_pagecache(inode, isize); 173 if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) { 174 down_write(&iinfo->i_data_sem); 175 udf_clear_extent_cache(inode); 176 udf_truncate_extents(inode); 177 up_write(&iinfo->i_data_sem); 178 } 179 } 180 } 181 182 static int udf_writepage(struct page *page, struct writeback_control *wbc) 183 { 184 return block_write_full_page(page, udf_get_block, wbc); 185 } 186 187 static int udf_writepages(struct address_space *mapping, 188 struct writeback_control *wbc) 189 { 190 return mpage_writepages(mapping, wbc, udf_get_block); 191 } 192 193 static int udf_readpage(struct file *file, struct page *page) 194 { 195 return mpage_readpage(page, udf_get_block); 196 } 197 198 static void udf_readahead(struct readahead_control *rac) 199 { 200 mpage_readahead(rac, udf_get_block); 201 } 202 203 static int udf_write_begin(struct file *file, struct address_space *mapping, 204 loff_t pos, unsigned len, unsigned flags, 205 struct page **pagep, void **fsdata) 206 { 207 int ret; 208 209 ret = block_write_begin(mapping, pos, len, flags, pagep, udf_get_block); 210 if (unlikely(ret)) 211 udf_write_failed(mapping, pos + len); 212 return ret; 213 } 214 215 static ssize_t udf_direct_IO(struct kiocb *iocb, struct iov_iter *iter) 216 { 217 struct file *file = iocb->ki_filp; 218 struct address_space *mapping = file->f_mapping; 219 struct inode *inode = mapping->host; 220 size_t count = iov_iter_count(iter); 221 ssize_t ret; 222 223 ret = blockdev_direct_IO(iocb, inode, iter, udf_get_block); 224 if (unlikely(ret < 0 && iov_iter_rw(iter) == WRITE)) 225 udf_write_failed(mapping, iocb->ki_pos + count); 226 return ret; 227 } 228 229 static sector_t udf_bmap(struct address_space *mapping, sector_t block) 230 { 231 return generic_block_bmap(mapping, block, udf_get_block); 232 } 233 234 const struct address_space_operations udf_aops = { 235 .readpage = udf_readpage, 236 .readahead = udf_readahead, 237 .writepage = udf_writepage, 238 .writepages = udf_writepages, 239 .write_begin = udf_write_begin, 240 .write_end = generic_write_end, 241 .direct_IO = udf_direct_IO, 242 .bmap = udf_bmap, 243 }; 244 245 /* 246 * Expand file stored in ICB to a normal one-block-file 247 * 248 * This function requires i_data_sem for writing and releases it. 249 * This function requires i_mutex held 250 */ 251 int udf_expand_file_adinicb(struct inode *inode) 252 { 253 struct page *page; 254 char *kaddr; 255 struct udf_inode_info *iinfo = UDF_I(inode); 256 int err; 257 struct writeback_control udf_wbc = { 258 .sync_mode = WB_SYNC_NONE, 259 .nr_to_write = 1, 260 }; 261 262 WARN_ON_ONCE(!inode_is_locked(inode)); 263 if (!iinfo->i_lenAlloc) { 264 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD)) 265 iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT; 266 else 267 iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG; 268 /* from now on we have normal address_space methods */ 269 inode->i_data.a_ops = &udf_aops; 270 up_write(&iinfo->i_data_sem); 271 mark_inode_dirty(inode); 272 return 0; 273 } 274 /* 275 * Release i_data_sem so that we can lock a page - page lock ranks 276 * above i_data_sem. i_mutex still protects us against file changes. 277 */ 278 up_write(&iinfo->i_data_sem); 279 280 page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS); 281 if (!page) 282 return -ENOMEM; 283 284 if (!PageUptodate(page)) { 285 kaddr = kmap_atomic(page); 286 memset(kaddr + iinfo->i_lenAlloc, 0x00, 287 PAGE_SIZE - iinfo->i_lenAlloc); 288 memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr, 289 iinfo->i_lenAlloc); 290 flush_dcache_page(page); 291 SetPageUptodate(page); 292 kunmap_atomic(kaddr); 293 } 294 down_write(&iinfo->i_data_sem); 295 memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr, 0x00, 296 iinfo->i_lenAlloc); 297 iinfo->i_lenAlloc = 0; 298 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD)) 299 iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT; 300 else 301 iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG; 302 /* from now on we have normal address_space methods */ 303 inode->i_data.a_ops = &udf_aops; 304 up_write(&iinfo->i_data_sem); 305 err = inode->i_data.a_ops->writepage(page, &udf_wbc); 306 if (err) { 307 /* Restore everything back so that we don't lose data... */ 308 lock_page(page); 309 down_write(&iinfo->i_data_sem); 310 kaddr = kmap_atomic(page); 311 memcpy(iinfo->i_ext.i_data + iinfo->i_lenEAttr, kaddr, 312 inode->i_size); 313 kunmap_atomic(kaddr); 314 unlock_page(page); 315 iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB; 316 inode->i_data.a_ops = &udf_adinicb_aops; 317 up_write(&iinfo->i_data_sem); 318 } 319 put_page(page); 320 mark_inode_dirty(inode); 321 322 return err; 323 } 324 325 struct buffer_head *udf_expand_dir_adinicb(struct inode *inode, 326 udf_pblk_t *block, int *err) 327 { 328 udf_pblk_t newblock; 329 struct buffer_head *dbh = NULL; 330 struct kernel_lb_addr eloc; 331 uint8_t alloctype; 332 struct extent_position epos; 333 334 struct udf_fileident_bh sfibh, dfibh; 335 loff_t f_pos = udf_ext0_offset(inode); 336 int size = udf_ext0_offset(inode) + inode->i_size; 337 struct fileIdentDesc cfi, *sfi, *dfi; 338 struct udf_inode_info *iinfo = UDF_I(inode); 339 340 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD)) 341 alloctype = ICBTAG_FLAG_AD_SHORT; 342 else 343 alloctype = ICBTAG_FLAG_AD_LONG; 344 345 if (!inode->i_size) { 346 iinfo->i_alloc_type = alloctype; 347 mark_inode_dirty(inode); 348 return NULL; 349 } 350 351 /* alloc block, and copy data to it */ 352 *block = udf_new_block(inode->i_sb, inode, 353 iinfo->i_location.partitionReferenceNum, 354 iinfo->i_location.logicalBlockNum, err); 355 if (!(*block)) 356 return NULL; 357 newblock = udf_get_pblock(inode->i_sb, *block, 358 iinfo->i_location.partitionReferenceNum, 359 0); 360 if (!newblock) 361 return NULL; 362 dbh = udf_tgetblk(inode->i_sb, newblock); 363 if (!dbh) 364 return NULL; 365 lock_buffer(dbh); 366 memset(dbh->b_data, 0x00, inode->i_sb->s_blocksize); 367 set_buffer_uptodate(dbh); 368 unlock_buffer(dbh); 369 mark_buffer_dirty_inode(dbh, inode); 370 371 sfibh.soffset = sfibh.eoffset = 372 f_pos & (inode->i_sb->s_blocksize - 1); 373 sfibh.sbh = sfibh.ebh = NULL; 374 dfibh.soffset = dfibh.eoffset = 0; 375 dfibh.sbh = dfibh.ebh = dbh; 376 while (f_pos < size) { 377 iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB; 378 sfi = udf_fileident_read(inode, &f_pos, &sfibh, &cfi, NULL, 379 NULL, NULL, NULL); 380 if (!sfi) { 381 brelse(dbh); 382 return NULL; 383 } 384 iinfo->i_alloc_type = alloctype; 385 sfi->descTag.tagLocation = cpu_to_le32(*block); 386 dfibh.soffset = dfibh.eoffset; 387 dfibh.eoffset += (sfibh.eoffset - sfibh.soffset); 388 dfi = (struct fileIdentDesc *)(dbh->b_data + dfibh.soffset); 389 if (udf_write_fi(inode, sfi, dfi, &dfibh, sfi->impUse, 390 sfi->fileIdent + 391 le16_to_cpu(sfi->lengthOfImpUse))) { 392 iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB; 393 brelse(dbh); 394 return NULL; 395 } 396 } 397 mark_buffer_dirty_inode(dbh, inode); 398 399 memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr, 0, 400 iinfo->i_lenAlloc); 401 iinfo->i_lenAlloc = 0; 402 eloc.logicalBlockNum = *block; 403 eloc.partitionReferenceNum = 404 iinfo->i_location.partitionReferenceNum; 405 iinfo->i_lenExtents = inode->i_size; 406 epos.bh = NULL; 407 epos.block = iinfo->i_location; 408 epos.offset = udf_file_entry_alloc_offset(inode); 409 udf_add_aext(inode, &epos, &eloc, inode->i_size, 0); 410 /* UniqueID stuff */ 411 412 brelse(epos.bh); 413 mark_inode_dirty(inode); 414 return dbh; 415 } 416 417 static int udf_get_block(struct inode *inode, sector_t block, 418 struct buffer_head *bh_result, int create) 419 { 420 int err, new; 421 sector_t phys = 0; 422 struct udf_inode_info *iinfo; 423 424 if (!create) { 425 phys = udf_block_map(inode, block); 426 if (phys) 427 map_bh(bh_result, inode->i_sb, phys); 428 return 0; 429 } 430 431 err = -EIO; 432 new = 0; 433 iinfo = UDF_I(inode); 434 435 down_write(&iinfo->i_data_sem); 436 if (block == iinfo->i_next_alloc_block + 1) { 437 iinfo->i_next_alloc_block++; 438 iinfo->i_next_alloc_goal++; 439 } 440 441 udf_clear_extent_cache(inode); 442 phys = inode_getblk(inode, block, &err, &new); 443 if (!phys) 444 goto abort; 445 446 if (new) 447 set_buffer_new(bh_result); 448 map_bh(bh_result, inode->i_sb, phys); 449 450 abort: 451 up_write(&iinfo->i_data_sem); 452 return err; 453 } 454 455 static struct buffer_head *udf_getblk(struct inode *inode, udf_pblk_t block, 456 int create, int *err) 457 { 458 struct buffer_head *bh; 459 struct buffer_head dummy; 460 461 dummy.b_state = 0; 462 dummy.b_blocknr = -1000; 463 *err = udf_get_block(inode, block, &dummy, create); 464 if (!*err && buffer_mapped(&dummy)) { 465 bh = sb_getblk(inode->i_sb, dummy.b_blocknr); 466 if (buffer_new(&dummy)) { 467 lock_buffer(bh); 468 memset(bh->b_data, 0x00, inode->i_sb->s_blocksize); 469 set_buffer_uptodate(bh); 470 unlock_buffer(bh); 471 mark_buffer_dirty_inode(bh, inode); 472 } 473 return bh; 474 } 475 476 return NULL; 477 } 478 479 /* Extend the file with new blocks totaling 'new_block_bytes', 480 * return the number of extents added 481 */ 482 static int udf_do_extend_file(struct inode *inode, 483 struct extent_position *last_pos, 484 struct kernel_long_ad *last_ext, 485 loff_t new_block_bytes) 486 { 487 uint32_t add; 488 int count = 0, fake = !(last_ext->extLength & UDF_EXTENT_LENGTH_MASK); 489 struct super_block *sb = inode->i_sb; 490 struct kernel_lb_addr prealloc_loc = {}; 491 uint32_t prealloc_len = 0; 492 struct udf_inode_info *iinfo; 493 int err; 494 495 /* The previous extent is fake and we should not extend by anything 496 * - there's nothing to do... */ 497 if (!new_block_bytes && fake) 498 return 0; 499 500 iinfo = UDF_I(inode); 501 /* Round the last extent up to a multiple of block size */ 502 if (last_ext->extLength & (sb->s_blocksize - 1)) { 503 last_ext->extLength = 504 (last_ext->extLength & UDF_EXTENT_FLAG_MASK) | 505 (((last_ext->extLength & UDF_EXTENT_LENGTH_MASK) + 506 sb->s_blocksize - 1) & ~(sb->s_blocksize - 1)); 507 iinfo->i_lenExtents = 508 (iinfo->i_lenExtents + sb->s_blocksize - 1) & 509 ~(sb->s_blocksize - 1); 510 } 511 512 /* Last extent are just preallocated blocks? */ 513 if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) == 514 EXT_NOT_RECORDED_ALLOCATED) { 515 /* Save the extent so that we can reattach it to the end */ 516 prealloc_loc = last_ext->extLocation; 517 prealloc_len = last_ext->extLength; 518 /* Mark the extent as a hole */ 519 last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | 520 (last_ext->extLength & UDF_EXTENT_LENGTH_MASK); 521 last_ext->extLocation.logicalBlockNum = 0; 522 last_ext->extLocation.partitionReferenceNum = 0; 523 } 524 525 /* Can we merge with the previous extent? */ 526 if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) == 527 EXT_NOT_RECORDED_NOT_ALLOCATED) { 528 add = (1 << 30) - sb->s_blocksize - 529 (last_ext->extLength & UDF_EXTENT_LENGTH_MASK); 530 if (add > new_block_bytes) 531 add = new_block_bytes; 532 new_block_bytes -= add; 533 last_ext->extLength += add; 534 } 535 536 if (fake) { 537 udf_add_aext(inode, last_pos, &last_ext->extLocation, 538 last_ext->extLength, 1); 539 count++; 540 } else { 541 struct kernel_lb_addr tmploc; 542 uint32_t tmplen; 543 544 udf_write_aext(inode, last_pos, &last_ext->extLocation, 545 last_ext->extLength, 1); 546 /* 547 * We've rewritten the last extent but there may be empty 548 * indirect extent after it - enter it. 549 */ 550 udf_next_aext(inode, last_pos, &tmploc, &tmplen, 0); 551 } 552 553 /* Managed to do everything necessary? */ 554 if (!new_block_bytes) 555 goto out; 556 557 /* All further extents will be NOT_RECORDED_NOT_ALLOCATED */ 558 last_ext->extLocation.logicalBlockNum = 0; 559 last_ext->extLocation.partitionReferenceNum = 0; 560 add = (1 << 30) - sb->s_blocksize; 561 last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | add; 562 563 /* Create enough extents to cover the whole hole */ 564 while (new_block_bytes > add) { 565 new_block_bytes -= add; 566 err = udf_add_aext(inode, last_pos, &last_ext->extLocation, 567 last_ext->extLength, 1); 568 if (err) 569 return err; 570 count++; 571 } 572 if (new_block_bytes) { 573 last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | 574 new_block_bytes; 575 err = udf_add_aext(inode, last_pos, &last_ext->extLocation, 576 last_ext->extLength, 1); 577 if (err) 578 return err; 579 count++; 580 } 581 582 out: 583 /* Do we have some preallocated blocks saved? */ 584 if (prealloc_len) { 585 err = udf_add_aext(inode, last_pos, &prealloc_loc, 586 prealloc_len, 1); 587 if (err) 588 return err; 589 last_ext->extLocation = prealloc_loc; 590 last_ext->extLength = prealloc_len; 591 count++; 592 } 593 594 /* last_pos should point to the last written extent... */ 595 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) 596 last_pos->offset -= sizeof(struct short_ad); 597 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) 598 last_pos->offset -= sizeof(struct long_ad); 599 else 600 return -EIO; 601 602 return count; 603 } 604 605 /* Extend the final block of the file to final_block_len bytes */ 606 static void udf_do_extend_final_block(struct inode *inode, 607 struct extent_position *last_pos, 608 struct kernel_long_ad *last_ext, 609 uint32_t final_block_len) 610 { 611 struct super_block *sb = inode->i_sb; 612 uint32_t added_bytes; 613 614 added_bytes = final_block_len - 615 (last_ext->extLength & (sb->s_blocksize - 1)); 616 last_ext->extLength += added_bytes; 617 UDF_I(inode)->i_lenExtents += added_bytes; 618 619 udf_write_aext(inode, last_pos, &last_ext->extLocation, 620 last_ext->extLength, 1); 621 } 622 623 static int udf_extend_file(struct inode *inode, loff_t newsize) 624 { 625 626 struct extent_position epos; 627 struct kernel_lb_addr eloc; 628 uint32_t elen; 629 int8_t etype; 630 struct super_block *sb = inode->i_sb; 631 sector_t first_block = newsize >> sb->s_blocksize_bits, offset; 632 unsigned long partial_final_block; 633 int adsize; 634 struct udf_inode_info *iinfo = UDF_I(inode); 635 struct kernel_long_ad extent; 636 int err = 0; 637 int within_final_block; 638 639 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) 640 adsize = sizeof(struct short_ad); 641 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) 642 adsize = sizeof(struct long_ad); 643 else 644 BUG(); 645 646 etype = inode_bmap(inode, first_block, &epos, &eloc, &elen, &offset); 647 within_final_block = (etype != -1); 648 649 if ((!epos.bh && epos.offset == udf_file_entry_alloc_offset(inode)) || 650 (epos.bh && epos.offset == sizeof(struct allocExtDesc))) { 651 /* File has no extents at all or has empty last 652 * indirect extent! Create a fake extent... */ 653 extent.extLocation.logicalBlockNum = 0; 654 extent.extLocation.partitionReferenceNum = 0; 655 extent.extLength = EXT_NOT_RECORDED_NOT_ALLOCATED; 656 } else { 657 epos.offset -= adsize; 658 etype = udf_next_aext(inode, &epos, &extent.extLocation, 659 &extent.extLength, 0); 660 extent.extLength |= etype << 30; 661 } 662 663 partial_final_block = newsize & (sb->s_blocksize - 1); 664 665 /* File has extent covering the new size (could happen when extending 666 * inside a block)? 667 */ 668 if (within_final_block) { 669 /* Extending file within the last file block */ 670 udf_do_extend_final_block(inode, &epos, &extent, 671 partial_final_block); 672 } else { 673 loff_t add = ((loff_t)offset << sb->s_blocksize_bits) | 674 partial_final_block; 675 err = udf_do_extend_file(inode, &epos, &extent, add); 676 } 677 678 if (err < 0) 679 goto out; 680 err = 0; 681 iinfo->i_lenExtents = newsize; 682 out: 683 brelse(epos.bh); 684 return err; 685 } 686 687 static sector_t inode_getblk(struct inode *inode, sector_t block, 688 int *err, int *new) 689 { 690 struct kernel_long_ad laarr[EXTENT_MERGE_SIZE]; 691 struct extent_position prev_epos, cur_epos, next_epos; 692 int count = 0, startnum = 0, endnum = 0; 693 uint32_t elen = 0, tmpelen; 694 struct kernel_lb_addr eloc, tmpeloc; 695 int c = 1; 696 loff_t lbcount = 0, b_off = 0; 697 udf_pblk_t newblocknum, newblock; 698 sector_t offset = 0; 699 int8_t etype; 700 struct udf_inode_info *iinfo = UDF_I(inode); 701 udf_pblk_t goal = 0, pgoal = iinfo->i_location.logicalBlockNum; 702 int lastblock = 0; 703 bool isBeyondEOF; 704 705 *err = 0; 706 *new = 0; 707 prev_epos.offset = udf_file_entry_alloc_offset(inode); 708 prev_epos.block = iinfo->i_location; 709 prev_epos.bh = NULL; 710 cur_epos = next_epos = prev_epos; 711 b_off = (loff_t)block << inode->i_sb->s_blocksize_bits; 712 713 /* find the extent which contains the block we are looking for. 714 alternate between laarr[0] and laarr[1] for locations of the 715 current extent, and the previous extent */ 716 do { 717 if (prev_epos.bh != cur_epos.bh) { 718 brelse(prev_epos.bh); 719 get_bh(cur_epos.bh); 720 prev_epos.bh = cur_epos.bh; 721 } 722 if (cur_epos.bh != next_epos.bh) { 723 brelse(cur_epos.bh); 724 get_bh(next_epos.bh); 725 cur_epos.bh = next_epos.bh; 726 } 727 728 lbcount += elen; 729 730 prev_epos.block = cur_epos.block; 731 cur_epos.block = next_epos.block; 732 733 prev_epos.offset = cur_epos.offset; 734 cur_epos.offset = next_epos.offset; 735 736 etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 1); 737 if (etype == -1) 738 break; 739 740 c = !c; 741 742 laarr[c].extLength = (etype << 30) | elen; 743 laarr[c].extLocation = eloc; 744 745 if (etype != (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) 746 pgoal = eloc.logicalBlockNum + 747 ((elen + inode->i_sb->s_blocksize - 1) >> 748 inode->i_sb->s_blocksize_bits); 749 750 count++; 751 } while (lbcount + elen <= b_off); 752 753 b_off -= lbcount; 754 offset = b_off >> inode->i_sb->s_blocksize_bits; 755 /* 756 * Move prev_epos and cur_epos into indirect extent if we are at 757 * the pointer to it 758 */ 759 udf_next_aext(inode, &prev_epos, &tmpeloc, &tmpelen, 0); 760 udf_next_aext(inode, &cur_epos, &tmpeloc, &tmpelen, 0); 761 762 /* if the extent is allocated and recorded, return the block 763 if the extent is not a multiple of the blocksize, round up */ 764 765 if (etype == (EXT_RECORDED_ALLOCATED >> 30)) { 766 if (elen & (inode->i_sb->s_blocksize - 1)) { 767 elen = EXT_RECORDED_ALLOCATED | 768 ((elen + inode->i_sb->s_blocksize - 1) & 769 ~(inode->i_sb->s_blocksize - 1)); 770 udf_write_aext(inode, &cur_epos, &eloc, elen, 1); 771 } 772 newblock = udf_get_lb_pblock(inode->i_sb, &eloc, offset); 773 goto out_free; 774 } 775 776 /* Are we beyond EOF? */ 777 if (etype == -1) { 778 int ret; 779 loff_t hole_len; 780 isBeyondEOF = true; 781 if (count) { 782 if (c) 783 laarr[0] = laarr[1]; 784 startnum = 1; 785 } else { 786 /* Create a fake extent when there's not one */ 787 memset(&laarr[0].extLocation, 0x00, 788 sizeof(struct kernel_lb_addr)); 789 laarr[0].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED; 790 /* Will udf_do_extend_file() create real extent from 791 a fake one? */ 792 startnum = (offset > 0); 793 } 794 /* Create extents for the hole between EOF and offset */ 795 hole_len = (loff_t)offset << inode->i_blkbits; 796 ret = udf_do_extend_file(inode, &prev_epos, laarr, hole_len); 797 if (ret < 0) { 798 *err = ret; 799 newblock = 0; 800 goto out_free; 801 } 802 c = 0; 803 offset = 0; 804 count += ret; 805 /* We are not covered by a preallocated extent? */ 806 if ((laarr[0].extLength & UDF_EXTENT_FLAG_MASK) != 807 EXT_NOT_RECORDED_ALLOCATED) { 808 /* Is there any real extent? - otherwise we overwrite 809 * the fake one... */ 810 if (count) 811 c = !c; 812 laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | 813 inode->i_sb->s_blocksize; 814 memset(&laarr[c].extLocation, 0x00, 815 sizeof(struct kernel_lb_addr)); 816 count++; 817 } 818 endnum = c + 1; 819 lastblock = 1; 820 } else { 821 isBeyondEOF = false; 822 endnum = startnum = ((count > 2) ? 2 : count); 823 824 /* if the current extent is in position 0, 825 swap it with the previous */ 826 if (!c && count != 1) { 827 laarr[2] = laarr[0]; 828 laarr[0] = laarr[1]; 829 laarr[1] = laarr[2]; 830 c = 1; 831 } 832 833 /* if the current block is located in an extent, 834 read the next extent */ 835 etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 0); 836 if (etype != -1) { 837 laarr[c + 1].extLength = (etype << 30) | elen; 838 laarr[c + 1].extLocation = eloc; 839 count++; 840 startnum++; 841 endnum++; 842 } else 843 lastblock = 1; 844 } 845 846 /* if the current extent is not recorded but allocated, get the 847 * block in the extent corresponding to the requested block */ 848 if ((laarr[c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) 849 newblocknum = laarr[c].extLocation.logicalBlockNum + offset; 850 else { /* otherwise, allocate a new block */ 851 if (iinfo->i_next_alloc_block == block) 852 goal = iinfo->i_next_alloc_goal; 853 854 if (!goal) { 855 if (!(goal = pgoal)) /* XXX: what was intended here? */ 856 goal = iinfo->i_location.logicalBlockNum + 1; 857 } 858 859 newblocknum = udf_new_block(inode->i_sb, inode, 860 iinfo->i_location.partitionReferenceNum, 861 goal, err); 862 if (!newblocknum) { 863 *err = -ENOSPC; 864 newblock = 0; 865 goto out_free; 866 } 867 if (isBeyondEOF) 868 iinfo->i_lenExtents += inode->i_sb->s_blocksize; 869 } 870 871 /* if the extent the requsted block is located in contains multiple 872 * blocks, split the extent into at most three extents. blocks prior 873 * to requested block, requested block, and blocks after requested 874 * block */ 875 udf_split_extents(inode, &c, offset, newblocknum, laarr, &endnum); 876 877 /* We preallocate blocks only for regular files. It also makes sense 878 * for directories but there's a problem when to drop the 879 * preallocation. We might use some delayed work for that but I feel 880 * it's overengineering for a filesystem like UDF. */ 881 if (S_ISREG(inode->i_mode)) 882 udf_prealloc_extents(inode, c, lastblock, laarr, &endnum); 883 884 /* merge any continuous blocks in laarr */ 885 udf_merge_extents(inode, laarr, &endnum); 886 887 /* write back the new extents, inserting new extents if the new number 888 * of extents is greater than the old number, and deleting extents if 889 * the new number of extents is less than the old number */ 890 udf_update_extents(inode, laarr, startnum, endnum, &prev_epos); 891 892 newblock = udf_get_pblock(inode->i_sb, newblocknum, 893 iinfo->i_location.partitionReferenceNum, 0); 894 if (!newblock) { 895 *err = -EIO; 896 goto out_free; 897 } 898 *new = 1; 899 iinfo->i_next_alloc_block = block; 900 iinfo->i_next_alloc_goal = newblocknum; 901 inode->i_ctime = current_time(inode); 902 903 if (IS_SYNC(inode)) 904 udf_sync_inode(inode); 905 else 906 mark_inode_dirty(inode); 907 out_free: 908 brelse(prev_epos.bh); 909 brelse(cur_epos.bh); 910 brelse(next_epos.bh); 911 return newblock; 912 } 913 914 static void udf_split_extents(struct inode *inode, int *c, int offset, 915 udf_pblk_t newblocknum, 916 struct kernel_long_ad *laarr, int *endnum) 917 { 918 unsigned long blocksize = inode->i_sb->s_blocksize; 919 unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits; 920 921 if ((laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30) || 922 (laarr[*c].extLength >> 30) == 923 (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) { 924 int curr = *c; 925 int blen = ((laarr[curr].extLength & UDF_EXTENT_LENGTH_MASK) + 926 blocksize - 1) >> blocksize_bits; 927 int8_t etype = (laarr[curr].extLength >> 30); 928 929 if (blen == 1) 930 ; 931 else if (!offset || blen == offset + 1) { 932 laarr[curr + 2] = laarr[curr + 1]; 933 laarr[curr + 1] = laarr[curr]; 934 } else { 935 laarr[curr + 3] = laarr[curr + 1]; 936 laarr[curr + 2] = laarr[curr + 1] = laarr[curr]; 937 } 938 939 if (offset) { 940 if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) { 941 udf_free_blocks(inode->i_sb, inode, 942 &laarr[curr].extLocation, 943 0, offset); 944 laarr[curr].extLength = 945 EXT_NOT_RECORDED_NOT_ALLOCATED | 946 (offset << blocksize_bits); 947 laarr[curr].extLocation.logicalBlockNum = 0; 948 laarr[curr].extLocation. 949 partitionReferenceNum = 0; 950 } else 951 laarr[curr].extLength = (etype << 30) | 952 (offset << blocksize_bits); 953 curr++; 954 (*c)++; 955 (*endnum)++; 956 } 957 958 laarr[curr].extLocation.logicalBlockNum = newblocknum; 959 if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) 960 laarr[curr].extLocation.partitionReferenceNum = 961 UDF_I(inode)->i_location.partitionReferenceNum; 962 laarr[curr].extLength = EXT_RECORDED_ALLOCATED | 963 blocksize; 964 curr++; 965 966 if (blen != offset + 1) { 967 if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) 968 laarr[curr].extLocation.logicalBlockNum += 969 offset + 1; 970 laarr[curr].extLength = (etype << 30) | 971 ((blen - (offset + 1)) << blocksize_bits); 972 curr++; 973 (*endnum)++; 974 } 975 } 976 } 977 978 static void udf_prealloc_extents(struct inode *inode, int c, int lastblock, 979 struct kernel_long_ad *laarr, 980 int *endnum) 981 { 982 int start, length = 0, currlength = 0, i; 983 984 if (*endnum >= (c + 1)) { 985 if (!lastblock) 986 return; 987 else 988 start = c; 989 } else { 990 if ((laarr[c + 1].extLength >> 30) == 991 (EXT_NOT_RECORDED_ALLOCATED >> 30)) { 992 start = c + 1; 993 length = currlength = 994 (((laarr[c + 1].extLength & 995 UDF_EXTENT_LENGTH_MASK) + 996 inode->i_sb->s_blocksize - 1) >> 997 inode->i_sb->s_blocksize_bits); 998 } else 999 start = c; 1000 } 1001 1002 for (i = start + 1; i <= *endnum; i++) { 1003 if (i == *endnum) { 1004 if (lastblock) 1005 length += UDF_DEFAULT_PREALLOC_BLOCKS; 1006 } else if ((laarr[i].extLength >> 30) == 1007 (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) { 1008 length += (((laarr[i].extLength & 1009 UDF_EXTENT_LENGTH_MASK) + 1010 inode->i_sb->s_blocksize - 1) >> 1011 inode->i_sb->s_blocksize_bits); 1012 } else 1013 break; 1014 } 1015 1016 if (length) { 1017 int next = laarr[start].extLocation.logicalBlockNum + 1018 (((laarr[start].extLength & UDF_EXTENT_LENGTH_MASK) + 1019 inode->i_sb->s_blocksize - 1) >> 1020 inode->i_sb->s_blocksize_bits); 1021 int numalloc = udf_prealloc_blocks(inode->i_sb, inode, 1022 laarr[start].extLocation.partitionReferenceNum, 1023 next, (UDF_DEFAULT_PREALLOC_BLOCKS > length ? 1024 length : UDF_DEFAULT_PREALLOC_BLOCKS) - 1025 currlength); 1026 if (numalloc) { 1027 if (start == (c + 1)) 1028 laarr[start].extLength += 1029 (numalloc << 1030 inode->i_sb->s_blocksize_bits); 1031 else { 1032 memmove(&laarr[c + 2], &laarr[c + 1], 1033 sizeof(struct long_ad) * (*endnum - (c + 1))); 1034 (*endnum)++; 1035 laarr[c + 1].extLocation.logicalBlockNum = next; 1036 laarr[c + 1].extLocation.partitionReferenceNum = 1037 laarr[c].extLocation. 1038 partitionReferenceNum; 1039 laarr[c + 1].extLength = 1040 EXT_NOT_RECORDED_ALLOCATED | 1041 (numalloc << 1042 inode->i_sb->s_blocksize_bits); 1043 start = c + 1; 1044 } 1045 1046 for (i = start + 1; numalloc && i < *endnum; i++) { 1047 int elen = ((laarr[i].extLength & 1048 UDF_EXTENT_LENGTH_MASK) + 1049 inode->i_sb->s_blocksize - 1) >> 1050 inode->i_sb->s_blocksize_bits; 1051 1052 if (elen > numalloc) { 1053 laarr[i].extLength -= 1054 (numalloc << 1055 inode->i_sb->s_blocksize_bits); 1056 numalloc = 0; 1057 } else { 1058 numalloc -= elen; 1059 if (*endnum > (i + 1)) 1060 memmove(&laarr[i], 1061 &laarr[i + 1], 1062 sizeof(struct long_ad) * 1063 (*endnum - (i + 1))); 1064 i--; 1065 (*endnum)--; 1066 } 1067 } 1068 UDF_I(inode)->i_lenExtents += 1069 numalloc << inode->i_sb->s_blocksize_bits; 1070 } 1071 } 1072 } 1073 1074 static void udf_merge_extents(struct inode *inode, struct kernel_long_ad *laarr, 1075 int *endnum) 1076 { 1077 int i; 1078 unsigned long blocksize = inode->i_sb->s_blocksize; 1079 unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits; 1080 1081 for (i = 0; i < (*endnum - 1); i++) { 1082 struct kernel_long_ad *li /*l[i]*/ = &laarr[i]; 1083 struct kernel_long_ad *lip1 /*l[i plus 1]*/ = &laarr[i + 1]; 1084 1085 if (((li->extLength >> 30) == (lip1->extLength >> 30)) && 1086 (((li->extLength >> 30) == 1087 (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) || 1088 ((lip1->extLocation.logicalBlockNum - 1089 li->extLocation.logicalBlockNum) == 1090 (((li->extLength & UDF_EXTENT_LENGTH_MASK) + 1091 blocksize - 1) >> blocksize_bits)))) { 1092 1093 if (((li->extLength & UDF_EXTENT_LENGTH_MASK) + 1094 (lip1->extLength & UDF_EXTENT_LENGTH_MASK) + 1095 blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK) { 1096 lip1->extLength = (lip1->extLength - 1097 (li->extLength & 1098 UDF_EXTENT_LENGTH_MASK) + 1099 UDF_EXTENT_LENGTH_MASK) & 1100 ~(blocksize - 1); 1101 li->extLength = (li->extLength & 1102 UDF_EXTENT_FLAG_MASK) + 1103 (UDF_EXTENT_LENGTH_MASK + 1) - 1104 blocksize; 1105 lip1->extLocation.logicalBlockNum = 1106 li->extLocation.logicalBlockNum + 1107 ((li->extLength & 1108 UDF_EXTENT_LENGTH_MASK) >> 1109 blocksize_bits); 1110 } else { 1111 li->extLength = lip1->extLength + 1112 (((li->extLength & 1113 UDF_EXTENT_LENGTH_MASK) + 1114 blocksize - 1) & ~(blocksize - 1)); 1115 if (*endnum > (i + 2)) 1116 memmove(&laarr[i + 1], &laarr[i + 2], 1117 sizeof(struct long_ad) * 1118 (*endnum - (i + 2))); 1119 i--; 1120 (*endnum)--; 1121 } 1122 } else if (((li->extLength >> 30) == 1123 (EXT_NOT_RECORDED_ALLOCATED >> 30)) && 1124 ((lip1->extLength >> 30) == 1125 (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))) { 1126 udf_free_blocks(inode->i_sb, inode, &li->extLocation, 0, 1127 ((li->extLength & 1128 UDF_EXTENT_LENGTH_MASK) + 1129 blocksize - 1) >> blocksize_bits); 1130 li->extLocation.logicalBlockNum = 0; 1131 li->extLocation.partitionReferenceNum = 0; 1132 1133 if (((li->extLength & UDF_EXTENT_LENGTH_MASK) + 1134 (lip1->extLength & UDF_EXTENT_LENGTH_MASK) + 1135 blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK) { 1136 lip1->extLength = (lip1->extLength - 1137 (li->extLength & 1138 UDF_EXTENT_LENGTH_MASK) + 1139 UDF_EXTENT_LENGTH_MASK) & 1140 ~(blocksize - 1); 1141 li->extLength = (li->extLength & 1142 UDF_EXTENT_FLAG_MASK) + 1143 (UDF_EXTENT_LENGTH_MASK + 1) - 1144 blocksize; 1145 } else { 1146 li->extLength = lip1->extLength + 1147 (((li->extLength & 1148 UDF_EXTENT_LENGTH_MASK) + 1149 blocksize - 1) & ~(blocksize - 1)); 1150 if (*endnum > (i + 2)) 1151 memmove(&laarr[i + 1], &laarr[i + 2], 1152 sizeof(struct long_ad) * 1153 (*endnum - (i + 2))); 1154 i--; 1155 (*endnum)--; 1156 } 1157 } else if ((li->extLength >> 30) == 1158 (EXT_NOT_RECORDED_ALLOCATED >> 30)) { 1159 udf_free_blocks(inode->i_sb, inode, 1160 &li->extLocation, 0, 1161 ((li->extLength & 1162 UDF_EXTENT_LENGTH_MASK) + 1163 blocksize - 1) >> blocksize_bits); 1164 li->extLocation.logicalBlockNum = 0; 1165 li->extLocation.partitionReferenceNum = 0; 1166 li->extLength = (li->extLength & 1167 UDF_EXTENT_LENGTH_MASK) | 1168 EXT_NOT_RECORDED_NOT_ALLOCATED; 1169 } 1170 } 1171 } 1172 1173 static void udf_update_extents(struct inode *inode, struct kernel_long_ad *laarr, 1174 int startnum, int endnum, 1175 struct extent_position *epos) 1176 { 1177 int start = 0, i; 1178 struct kernel_lb_addr tmploc; 1179 uint32_t tmplen; 1180 1181 if (startnum > endnum) { 1182 for (i = 0; i < (startnum - endnum); i++) 1183 udf_delete_aext(inode, *epos); 1184 } else if (startnum < endnum) { 1185 for (i = 0; i < (endnum - startnum); i++) { 1186 udf_insert_aext(inode, *epos, laarr[i].extLocation, 1187 laarr[i].extLength); 1188 udf_next_aext(inode, epos, &laarr[i].extLocation, 1189 &laarr[i].extLength, 1); 1190 start++; 1191 } 1192 } 1193 1194 for (i = start; i < endnum; i++) { 1195 udf_next_aext(inode, epos, &tmploc, &tmplen, 0); 1196 udf_write_aext(inode, epos, &laarr[i].extLocation, 1197 laarr[i].extLength, 1); 1198 } 1199 } 1200 1201 struct buffer_head *udf_bread(struct inode *inode, udf_pblk_t block, 1202 int create, int *err) 1203 { 1204 struct buffer_head *bh = NULL; 1205 1206 bh = udf_getblk(inode, block, create, err); 1207 if (!bh) 1208 return NULL; 1209 1210 if (buffer_uptodate(bh)) 1211 return bh; 1212 1213 ll_rw_block(REQ_OP_READ, 0, 1, &bh); 1214 1215 wait_on_buffer(bh); 1216 if (buffer_uptodate(bh)) 1217 return bh; 1218 1219 brelse(bh); 1220 *err = -EIO; 1221 return NULL; 1222 } 1223 1224 int udf_setsize(struct inode *inode, loff_t newsize) 1225 { 1226 int err; 1227 struct udf_inode_info *iinfo; 1228 unsigned int bsize = i_blocksize(inode); 1229 1230 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 1231 S_ISLNK(inode->i_mode))) 1232 return -EINVAL; 1233 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) 1234 return -EPERM; 1235 1236 iinfo = UDF_I(inode); 1237 if (newsize > inode->i_size) { 1238 down_write(&iinfo->i_data_sem); 1239 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { 1240 if (bsize < 1241 (udf_file_entry_alloc_offset(inode) + newsize)) { 1242 err = udf_expand_file_adinicb(inode); 1243 if (err) 1244 return err; 1245 down_write(&iinfo->i_data_sem); 1246 } else { 1247 iinfo->i_lenAlloc = newsize; 1248 goto set_size; 1249 } 1250 } 1251 err = udf_extend_file(inode, newsize); 1252 if (err) { 1253 up_write(&iinfo->i_data_sem); 1254 return err; 1255 } 1256 set_size: 1257 up_write(&iinfo->i_data_sem); 1258 truncate_setsize(inode, newsize); 1259 } else { 1260 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { 1261 down_write(&iinfo->i_data_sem); 1262 udf_clear_extent_cache(inode); 1263 memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr + newsize, 1264 0x00, bsize - newsize - 1265 udf_file_entry_alloc_offset(inode)); 1266 iinfo->i_lenAlloc = newsize; 1267 truncate_setsize(inode, newsize); 1268 up_write(&iinfo->i_data_sem); 1269 goto update_time; 1270 } 1271 err = block_truncate_page(inode->i_mapping, newsize, 1272 udf_get_block); 1273 if (err) 1274 return err; 1275 truncate_setsize(inode, newsize); 1276 down_write(&iinfo->i_data_sem); 1277 udf_clear_extent_cache(inode); 1278 err = udf_truncate_extents(inode); 1279 up_write(&iinfo->i_data_sem); 1280 if (err) 1281 return err; 1282 } 1283 update_time: 1284 inode->i_mtime = inode->i_ctime = current_time(inode); 1285 if (IS_SYNC(inode)) 1286 udf_sync_inode(inode); 1287 else 1288 mark_inode_dirty(inode); 1289 return 0; 1290 } 1291 1292 /* 1293 * Maximum length of linked list formed by ICB hierarchy. The chosen number is 1294 * arbitrary - just that we hopefully don't limit any real use of rewritten 1295 * inode on write-once media but avoid looping for too long on corrupted media. 1296 */ 1297 #define UDF_MAX_ICB_NESTING 1024 1298 1299 static int udf_read_inode(struct inode *inode, bool hidden_inode) 1300 { 1301 struct buffer_head *bh = NULL; 1302 struct fileEntry *fe; 1303 struct extendedFileEntry *efe; 1304 uint16_t ident; 1305 struct udf_inode_info *iinfo = UDF_I(inode); 1306 struct udf_sb_info *sbi = UDF_SB(inode->i_sb); 1307 struct kernel_lb_addr *iloc = &iinfo->i_location; 1308 unsigned int link_count; 1309 unsigned int indirections = 0; 1310 int bs = inode->i_sb->s_blocksize; 1311 int ret = -EIO; 1312 uint32_t uid, gid; 1313 1314 reread: 1315 if (iloc->partitionReferenceNum >= sbi->s_partitions) { 1316 udf_debug("partition reference: %u > logical volume partitions: %u\n", 1317 iloc->partitionReferenceNum, sbi->s_partitions); 1318 return -EIO; 1319 } 1320 1321 if (iloc->logicalBlockNum >= 1322 sbi->s_partmaps[iloc->partitionReferenceNum].s_partition_len) { 1323 udf_debug("block=%u, partition=%u out of range\n", 1324 iloc->logicalBlockNum, iloc->partitionReferenceNum); 1325 return -EIO; 1326 } 1327 1328 /* 1329 * Set defaults, but the inode is still incomplete! 1330 * Note: get_new_inode() sets the following on a new inode: 1331 * i_sb = sb 1332 * i_no = ino 1333 * i_flags = sb->s_flags 1334 * i_state = 0 1335 * clean_inode(): zero fills and sets 1336 * i_count = 1 1337 * i_nlink = 1 1338 * i_op = NULL; 1339 */ 1340 bh = udf_read_ptagged(inode->i_sb, iloc, 0, &ident); 1341 if (!bh) { 1342 udf_err(inode->i_sb, "(ino %lu) failed !bh\n", inode->i_ino); 1343 return -EIO; 1344 } 1345 1346 if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE && 1347 ident != TAG_IDENT_USE) { 1348 udf_err(inode->i_sb, "(ino %lu) failed ident=%u\n", 1349 inode->i_ino, ident); 1350 goto out; 1351 } 1352 1353 fe = (struct fileEntry *)bh->b_data; 1354 efe = (struct extendedFileEntry *)bh->b_data; 1355 1356 if (fe->icbTag.strategyType == cpu_to_le16(4096)) { 1357 struct buffer_head *ibh; 1358 1359 ibh = udf_read_ptagged(inode->i_sb, iloc, 1, &ident); 1360 if (ident == TAG_IDENT_IE && ibh) { 1361 struct kernel_lb_addr loc; 1362 struct indirectEntry *ie; 1363 1364 ie = (struct indirectEntry *)ibh->b_data; 1365 loc = lelb_to_cpu(ie->indirectICB.extLocation); 1366 1367 if (ie->indirectICB.extLength) { 1368 brelse(ibh); 1369 memcpy(&iinfo->i_location, &loc, 1370 sizeof(struct kernel_lb_addr)); 1371 if (++indirections > UDF_MAX_ICB_NESTING) { 1372 udf_err(inode->i_sb, 1373 "too many ICBs in ICB hierarchy" 1374 " (max %d supported)\n", 1375 UDF_MAX_ICB_NESTING); 1376 goto out; 1377 } 1378 brelse(bh); 1379 goto reread; 1380 } 1381 } 1382 brelse(ibh); 1383 } else if (fe->icbTag.strategyType != cpu_to_le16(4)) { 1384 udf_err(inode->i_sb, "unsupported strategy type: %u\n", 1385 le16_to_cpu(fe->icbTag.strategyType)); 1386 goto out; 1387 } 1388 if (fe->icbTag.strategyType == cpu_to_le16(4)) 1389 iinfo->i_strat4096 = 0; 1390 else /* if (fe->icbTag.strategyType == cpu_to_le16(4096)) */ 1391 iinfo->i_strat4096 = 1; 1392 1393 iinfo->i_alloc_type = le16_to_cpu(fe->icbTag.flags) & 1394 ICBTAG_FLAG_AD_MASK; 1395 if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_SHORT && 1396 iinfo->i_alloc_type != ICBTAG_FLAG_AD_LONG && 1397 iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) { 1398 ret = -EIO; 1399 goto out; 1400 } 1401 iinfo->i_unique = 0; 1402 iinfo->i_lenEAttr = 0; 1403 iinfo->i_lenExtents = 0; 1404 iinfo->i_lenAlloc = 0; 1405 iinfo->i_next_alloc_block = 0; 1406 iinfo->i_next_alloc_goal = 0; 1407 if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_EFE)) { 1408 iinfo->i_efe = 1; 1409 iinfo->i_use = 0; 1410 ret = udf_alloc_i_data(inode, bs - 1411 sizeof(struct extendedFileEntry)); 1412 if (ret) 1413 goto out; 1414 memcpy(iinfo->i_ext.i_data, 1415 bh->b_data + sizeof(struct extendedFileEntry), 1416 bs - sizeof(struct extendedFileEntry)); 1417 } else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_FE)) { 1418 iinfo->i_efe = 0; 1419 iinfo->i_use = 0; 1420 ret = udf_alloc_i_data(inode, bs - sizeof(struct fileEntry)); 1421 if (ret) 1422 goto out; 1423 memcpy(iinfo->i_ext.i_data, 1424 bh->b_data + sizeof(struct fileEntry), 1425 bs - sizeof(struct fileEntry)); 1426 } else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_USE)) { 1427 iinfo->i_efe = 0; 1428 iinfo->i_use = 1; 1429 iinfo->i_lenAlloc = le32_to_cpu( 1430 ((struct unallocSpaceEntry *)bh->b_data)-> 1431 lengthAllocDescs); 1432 ret = udf_alloc_i_data(inode, bs - 1433 sizeof(struct unallocSpaceEntry)); 1434 if (ret) 1435 goto out; 1436 memcpy(iinfo->i_ext.i_data, 1437 bh->b_data + sizeof(struct unallocSpaceEntry), 1438 bs - sizeof(struct unallocSpaceEntry)); 1439 return 0; 1440 } 1441 1442 ret = -EIO; 1443 read_lock(&sbi->s_cred_lock); 1444 uid = le32_to_cpu(fe->uid); 1445 if (uid == UDF_INVALID_ID || 1446 UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_SET)) 1447 inode->i_uid = sbi->s_uid; 1448 else 1449 i_uid_write(inode, uid); 1450 1451 gid = le32_to_cpu(fe->gid); 1452 if (gid == UDF_INVALID_ID || 1453 UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_SET)) 1454 inode->i_gid = sbi->s_gid; 1455 else 1456 i_gid_write(inode, gid); 1457 1458 if (fe->icbTag.fileType != ICBTAG_FILE_TYPE_DIRECTORY && 1459 sbi->s_fmode != UDF_INVALID_MODE) 1460 inode->i_mode = sbi->s_fmode; 1461 else if (fe->icbTag.fileType == ICBTAG_FILE_TYPE_DIRECTORY && 1462 sbi->s_dmode != UDF_INVALID_MODE) 1463 inode->i_mode = sbi->s_dmode; 1464 else 1465 inode->i_mode = udf_convert_permissions(fe); 1466 inode->i_mode &= ~sbi->s_umask; 1467 iinfo->i_extraPerms = le32_to_cpu(fe->permissions) & ~FE_MAPPED_PERMS; 1468 1469 read_unlock(&sbi->s_cred_lock); 1470 1471 link_count = le16_to_cpu(fe->fileLinkCount); 1472 if (!link_count) { 1473 if (!hidden_inode) { 1474 ret = -ESTALE; 1475 goto out; 1476 } 1477 link_count = 1; 1478 } 1479 set_nlink(inode, link_count); 1480 1481 inode->i_size = le64_to_cpu(fe->informationLength); 1482 iinfo->i_lenExtents = inode->i_size; 1483 1484 if (iinfo->i_efe == 0) { 1485 inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) << 1486 (inode->i_sb->s_blocksize_bits - 9); 1487 1488 udf_disk_stamp_to_time(&inode->i_atime, fe->accessTime); 1489 udf_disk_stamp_to_time(&inode->i_mtime, fe->modificationTime); 1490 udf_disk_stamp_to_time(&inode->i_ctime, fe->attrTime); 1491 1492 iinfo->i_unique = le64_to_cpu(fe->uniqueID); 1493 iinfo->i_lenEAttr = le32_to_cpu(fe->lengthExtendedAttr); 1494 iinfo->i_lenAlloc = le32_to_cpu(fe->lengthAllocDescs); 1495 iinfo->i_checkpoint = le32_to_cpu(fe->checkpoint); 1496 iinfo->i_streamdir = 0; 1497 iinfo->i_lenStreams = 0; 1498 } else { 1499 inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) << 1500 (inode->i_sb->s_blocksize_bits - 9); 1501 1502 udf_disk_stamp_to_time(&inode->i_atime, efe->accessTime); 1503 udf_disk_stamp_to_time(&inode->i_mtime, efe->modificationTime); 1504 udf_disk_stamp_to_time(&iinfo->i_crtime, efe->createTime); 1505 udf_disk_stamp_to_time(&inode->i_ctime, efe->attrTime); 1506 1507 iinfo->i_unique = le64_to_cpu(efe->uniqueID); 1508 iinfo->i_lenEAttr = le32_to_cpu(efe->lengthExtendedAttr); 1509 iinfo->i_lenAlloc = le32_to_cpu(efe->lengthAllocDescs); 1510 iinfo->i_checkpoint = le32_to_cpu(efe->checkpoint); 1511 1512 /* Named streams */ 1513 iinfo->i_streamdir = (efe->streamDirectoryICB.extLength != 0); 1514 iinfo->i_locStreamdir = 1515 lelb_to_cpu(efe->streamDirectoryICB.extLocation); 1516 iinfo->i_lenStreams = le64_to_cpu(efe->objectSize); 1517 if (iinfo->i_lenStreams >= inode->i_size) 1518 iinfo->i_lenStreams -= inode->i_size; 1519 else 1520 iinfo->i_lenStreams = 0; 1521 } 1522 inode->i_generation = iinfo->i_unique; 1523 1524 /* 1525 * Sanity check length of allocation descriptors and extended attrs to 1526 * avoid integer overflows 1527 */ 1528 if (iinfo->i_lenEAttr > bs || iinfo->i_lenAlloc > bs) 1529 goto out; 1530 /* Now do exact checks */ 1531 if (udf_file_entry_alloc_offset(inode) + iinfo->i_lenAlloc > bs) 1532 goto out; 1533 /* Sanity checks for files in ICB so that we don't get confused later */ 1534 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { 1535 /* 1536 * For file in ICB data is stored in allocation descriptor 1537 * so sizes should match 1538 */ 1539 if (iinfo->i_lenAlloc != inode->i_size) 1540 goto out; 1541 /* File in ICB has to fit in there... */ 1542 if (inode->i_size > bs - udf_file_entry_alloc_offset(inode)) 1543 goto out; 1544 } 1545 1546 switch (fe->icbTag.fileType) { 1547 case ICBTAG_FILE_TYPE_DIRECTORY: 1548 inode->i_op = &udf_dir_inode_operations; 1549 inode->i_fop = &udf_dir_operations; 1550 inode->i_mode |= S_IFDIR; 1551 inc_nlink(inode); 1552 break; 1553 case ICBTAG_FILE_TYPE_REALTIME: 1554 case ICBTAG_FILE_TYPE_REGULAR: 1555 case ICBTAG_FILE_TYPE_UNDEF: 1556 case ICBTAG_FILE_TYPE_VAT20: 1557 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) 1558 inode->i_data.a_ops = &udf_adinicb_aops; 1559 else 1560 inode->i_data.a_ops = &udf_aops; 1561 inode->i_op = &udf_file_inode_operations; 1562 inode->i_fop = &udf_file_operations; 1563 inode->i_mode |= S_IFREG; 1564 break; 1565 case ICBTAG_FILE_TYPE_BLOCK: 1566 inode->i_mode |= S_IFBLK; 1567 break; 1568 case ICBTAG_FILE_TYPE_CHAR: 1569 inode->i_mode |= S_IFCHR; 1570 break; 1571 case ICBTAG_FILE_TYPE_FIFO: 1572 init_special_inode(inode, inode->i_mode | S_IFIFO, 0); 1573 break; 1574 case ICBTAG_FILE_TYPE_SOCKET: 1575 init_special_inode(inode, inode->i_mode | S_IFSOCK, 0); 1576 break; 1577 case ICBTAG_FILE_TYPE_SYMLINK: 1578 inode->i_data.a_ops = &udf_symlink_aops; 1579 inode->i_op = &udf_symlink_inode_operations; 1580 inode_nohighmem(inode); 1581 inode->i_mode = S_IFLNK | 0777; 1582 break; 1583 case ICBTAG_FILE_TYPE_MAIN: 1584 udf_debug("METADATA FILE-----\n"); 1585 break; 1586 case ICBTAG_FILE_TYPE_MIRROR: 1587 udf_debug("METADATA MIRROR FILE-----\n"); 1588 break; 1589 case ICBTAG_FILE_TYPE_BITMAP: 1590 udf_debug("METADATA BITMAP FILE-----\n"); 1591 break; 1592 default: 1593 udf_err(inode->i_sb, "(ino %lu) failed unknown file type=%u\n", 1594 inode->i_ino, fe->icbTag.fileType); 1595 goto out; 1596 } 1597 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { 1598 struct deviceSpec *dsea = 1599 (struct deviceSpec *)udf_get_extendedattr(inode, 12, 1); 1600 if (dsea) { 1601 init_special_inode(inode, inode->i_mode, 1602 MKDEV(le32_to_cpu(dsea->majorDeviceIdent), 1603 le32_to_cpu(dsea->minorDeviceIdent))); 1604 /* Developer ID ??? */ 1605 } else 1606 goto out; 1607 } 1608 ret = 0; 1609 out: 1610 brelse(bh); 1611 return ret; 1612 } 1613 1614 static int udf_alloc_i_data(struct inode *inode, size_t size) 1615 { 1616 struct udf_inode_info *iinfo = UDF_I(inode); 1617 iinfo->i_ext.i_data = kmalloc(size, GFP_KERNEL); 1618 if (!iinfo->i_ext.i_data) 1619 return -ENOMEM; 1620 return 0; 1621 } 1622 1623 static umode_t udf_convert_permissions(struct fileEntry *fe) 1624 { 1625 umode_t mode; 1626 uint32_t permissions; 1627 uint32_t flags; 1628 1629 permissions = le32_to_cpu(fe->permissions); 1630 flags = le16_to_cpu(fe->icbTag.flags); 1631 1632 mode = ((permissions) & 0007) | 1633 ((permissions >> 2) & 0070) | 1634 ((permissions >> 4) & 0700) | 1635 ((flags & ICBTAG_FLAG_SETUID) ? S_ISUID : 0) | 1636 ((flags & ICBTAG_FLAG_SETGID) ? S_ISGID : 0) | 1637 ((flags & ICBTAG_FLAG_STICKY) ? S_ISVTX : 0); 1638 1639 return mode; 1640 } 1641 1642 void udf_update_extra_perms(struct inode *inode, umode_t mode) 1643 { 1644 struct udf_inode_info *iinfo = UDF_I(inode); 1645 1646 /* 1647 * UDF 2.01 sec. 3.3.3.3 Note 2: 1648 * In Unix, delete permission tracks write 1649 */ 1650 iinfo->i_extraPerms &= ~FE_DELETE_PERMS; 1651 if (mode & 0200) 1652 iinfo->i_extraPerms |= FE_PERM_U_DELETE; 1653 if (mode & 0020) 1654 iinfo->i_extraPerms |= FE_PERM_G_DELETE; 1655 if (mode & 0002) 1656 iinfo->i_extraPerms |= FE_PERM_O_DELETE; 1657 } 1658 1659 int udf_write_inode(struct inode *inode, struct writeback_control *wbc) 1660 { 1661 return udf_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL); 1662 } 1663 1664 static int udf_sync_inode(struct inode *inode) 1665 { 1666 return udf_update_inode(inode, 1); 1667 } 1668 1669 static void udf_adjust_time(struct udf_inode_info *iinfo, struct timespec64 time) 1670 { 1671 if (iinfo->i_crtime.tv_sec > time.tv_sec || 1672 (iinfo->i_crtime.tv_sec == time.tv_sec && 1673 iinfo->i_crtime.tv_nsec > time.tv_nsec)) 1674 iinfo->i_crtime = time; 1675 } 1676 1677 static int udf_update_inode(struct inode *inode, int do_sync) 1678 { 1679 struct buffer_head *bh = NULL; 1680 struct fileEntry *fe; 1681 struct extendedFileEntry *efe; 1682 uint64_t lb_recorded; 1683 uint32_t udfperms; 1684 uint16_t icbflags; 1685 uint16_t crclen; 1686 int err = 0; 1687 struct udf_sb_info *sbi = UDF_SB(inode->i_sb); 1688 unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits; 1689 struct udf_inode_info *iinfo = UDF_I(inode); 1690 1691 bh = udf_tgetblk(inode->i_sb, 1692 udf_get_lb_pblock(inode->i_sb, &iinfo->i_location, 0)); 1693 if (!bh) { 1694 udf_debug("getblk failure\n"); 1695 return -EIO; 1696 } 1697 1698 lock_buffer(bh); 1699 memset(bh->b_data, 0, inode->i_sb->s_blocksize); 1700 fe = (struct fileEntry *)bh->b_data; 1701 efe = (struct extendedFileEntry *)bh->b_data; 1702 1703 if (iinfo->i_use) { 1704 struct unallocSpaceEntry *use = 1705 (struct unallocSpaceEntry *)bh->b_data; 1706 1707 use->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc); 1708 memcpy(bh->b_data + sizeof(struct unallocSpaceEntry), 1709 iinfo->i_ext.i_data, inode->i_sb->s_blocksize - 1710 sizeof(struct unallocSpaceEntry)); 1711 use->descTag.tagIdent = cpu_to_le16(TAG_IDENT_USE); 1712 crclen = sizeof(struct unallocSpaceEntry); 1713 1714 goto finish; 1715 } 1716 1717 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_FORGET)) 1718 fe->uid = cpu_to_le32(UDF_INVALID_ID); 1719 else 1720 fe->uid = cpu_to_le32(i_uid_read(inode)); 1721 1722 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_FORGET)) 1723 fe->gid = cpu_to_le32(UDF_INVALID_ID); 1724 else 1725 fe->gid = cpu_to_le32(i_gid_read(inode)); 1726 1727 udfperms = ((inode->i_mode & 0007)) | 1728 ((inode->i_mode & 0070) << 2) | 1729 ((inode->i_mode & 0700) << 4); 1730 1731 udfperms |= iinfo->i_extraPerms; 1732 fe->permissions = cpu_to_le32(udfperms); 1733 1734 if (S_ISDIR(inode->i_mode) && inode->i_nlink > 0) 1735 fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1); 1736 else 1737 fe->fileLinkCount = cpu_to_le16(inode->i_nlink); 1738 1739 fe->informationLength = cpu_to_le64(inode->i_size); 1740 1741 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { 1742 struct regid *eid; 1743 struct deviceSpec *dsea = 1744 (struct deviceSpec *)udf_get_extendedattr(inode, 12, 1); 1745 if (!dsea) { 1746 dsea = (struct deviceSpec *) 1747 udf_add_extendedattr(inode, 1748 sizeof(struct deviceSpec) + 1749 sizeof(struct regid), 12, 0x3); 1750 dsea->attrType = cpu_to_le32(12); 1751 dsea->attrSubtype = 1; 1752 dsea->attrLength = cpu_to_le32( 1753 sizeof(struct deviceSpec) + 1754 sizeof(struct regid)); 1755 dsea->impUseLength = cpu_to_le32(sizeof(struct regid)); 1756 } 1757 eid = (struct regid *)dsea->impUse; 1758 memset(eid, 0, sizeof(*eid)); 1759 strcpy(eid->ident, UDF_ID_DEVELOPER); 1760 eid->identSuffix[0] = UDF_OS_CLASS_UNIX; 1761 eid->identSuffix[1] = UDF_OS_ID_LINUX; 1762 dsea->majorDeviceIdent = cpu_to_le32(imajor(inode)); 1763 dsea->minorDeviceIdent = cpu_to_le32(iminor(inode)); 1764 } 1765 1766 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) 1767 lb_recorded = 0; /* No extents => no blocks! */ 1768 else 1769 lb_recorded = 1770 (inode->i_blocks + (1 << (blocksize_bits - 9)) - 1) >> 1771 (blocksize_bits - 9); 1772 1773 if (iinfo->i_efe == 0) { 1774 memcpy(bh->b_data + sizeof(struct fileEntry), 1775 iinfo->i_ext.i_data, 1776 inode->i_sb->s_blocksize - sizeof(struct fileEntry)); 1777 fe->logicalBlocksRecorded = cpu_to_le64(lb_recorded); 1778 1779 udf_time_to_disk_stamp(&fe->accessTime, inode->i_atime); 1780 udf_time_to_disk_stamp(&fe->modificationTime, inode->i_mtime); 1781 udf_time_to_disk_stamp(&fe->attrTime, inode->i_ctime); 1782 memset(&(fe->impIdent), 0, sizeof(struct regid)); 1783 strcpy(fe->impIdent.ident, UDF_ID_DEVELOPER); 1784 fe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX; 1785 fe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX; 1786 fe->uniqueID = cpu_to_le64(iinfo->i_unique); 1787 fe->lengthExtendedAttr = cpu_to_le32(iinfo->i_lenEAttr); 1788 fe->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc); 1789 fe->checkpoint = cpu_to_le32(iinfo->i_checkpoint); 1790 fe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_FE); 1791 crclen = sizeof(struct fileEntry); 1792 } else { 1793 memcpy(bh->b_data + sizeof(struct extendedFileEntry), 1794 iinfo->i_ext.i_data, 1795 inode->i_sb->s_blocksize - 1796 sizeof(struct extendedFileEntry)); 1797 efe->objectSize = 1798 cpu_to_le64(inode->i_size + iinfo->i_lenStreams); 1799 efe->logicalBlocksRecorded = cpu_to_le64(lb_recorded); 1800 1801 if (iinfo->i_streamdir) { 1802 struct long_ad *icb_lad = &efe->streamDirectoryICB; 1803 1804 icb_lad->extLocation = 1805 cpu_to_lelb(iinfo->i_locStreamdir); 1806 icb_lad->extLength = 1807 cpu_to_le32(inode->i_sb->s_blocksize); 1808 } 1809 1810 udf_adjust_time(iinfo, inode->i_atime); 1811 udf_adjust_time(iinfo, inode->i_mtime); 1812 udf_adjust_time(iinfo, inode->i_ctime); 1813 1814 udf_time_to_disk_stamp(&efe->accessTime, inode->i_atime); 1815 udf_time_to_disk_stamp(&efe->modificationTime, inode->i_mtime); 1816 udf_time_to_disk_stamp(&efe->createTime, iinfo->i_crtime); 1817 udf_time_to_disk_stamp(&efe->attrTime, inode->i_ctime); 1818 1819 memset(&(efe->impIdent), 0, sizeof(efe->impIdent)); 1820 strcpy(efe->impIdent.ident, UDF_ID_DEVELOPER); 1821 efe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX; 1822 efe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX; 1823 efe->uniqueID = cpu_to_le64(iinfo->i_unique); 1824 efe->lengthExtendedAttr = cpu_to_le32(iinfo->i_lenEAttr); 1825 efe->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc); 1826 efe->checkpoint = cpu_to_le32(iinfo->i_checkpoint); 1827 efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE); 1828 crclen = sizeof(struct extendedFileEntry); 1829 } 1830 1831 finish: 1832 if (iinfo->i_strat4096) { 1833 fe->icbTag.strategyType = cpu_to_le16(4096); 1834 fe->icbTag.strategyParameter = cpu_to_le16(1); 1835 fe->icbTag.numEntries = cpu_to_le16(2); 1836 } else { 1837 fe->icbTag.strategyType = cpu_to_le16(4); 1838 fe->icbTag.numEntries = cpu_to_le16(1); 1839 } 1840 1841 if (iinfo->i_use) 1842 fe->icbTag.fileType = ICBTAG_FILE_TYPE_USE; 1843 else if (S_ISDIR(inode->i_mode)) 1844 fe->icbTag.fileType = ICBTAG_FILE_TYPE_DIRECTORY; 1845 else if (S_ISREG(inode->i_mode)) 1846 fe->icbTag.fileType = ICBTAG_FILE_TYPE_REGULAR; 1847 else if (S_ISLNK(inode->i_mode)) 1848 fe->icbTag.fileType = ICBTAG_FILE_TYPE_SYMLINK; 1849 else if (S_ISBLK(inode->i_mode)) 1850 fe->icbTag.fileType = ICBTAG_FILE_TYPE_BLOCK; 1851 else if (S_ISCHR(inode->i_mode)) 1852 fe->icbTag.fileType = ICBTAG_FILE_TYPE_CHAR; 1853 else if (S_ISFIFO(inode->i_mode)) 1854 fe->icbTag.fileType = ICBTAG_FILE_TYPE_FIFO; 1855 else if (S_ISSOCK(inode->i_mode)) 1856 fe->icbTag.fileType = ICBTAG_FILE_TYPE_SOCKET; 1857 1858 icbflags = iinfo->i_alloc_type | 1859 ((inode->i_mode & S_ISUID) ? ICBTAG_FLAG_SETUID : 0) | 1860 ((inode->i_mode & S_ISGID) ? ICBTAG_FLAG_SETGID : 0) | 1861 ((inode->i_mode & S_ISVTX) ? ICBTAG_FLAG_STICKY : 0) | 1862 (le16_to_cpu(fe->icbTag.flags) & 1863 ~(ICBTAG_FLAG_AD_MASK | ICBTAG_FLAG_SETUID | 1864 ICBTAG_FLAG_SETGID | ICBTAG_FLAG_STICKY)); 1865 1866 fe->icbTag.flags = cpu_to_le16(icbflags); 1867 if (sbi->s_udfrev >= 0x0200) 1868 fe->descTag.descVersion = cpu_to_le16(3); 1869 else 1870 fe->descTag.descVersion = cpu_to_le16(2); 1871 fe->descTag.tagSerialNum = cpu_to_le16(sbi->s_serial_number); 1872 fe->descTag.tagLocation = cpu_to_le32( 1873 iinfo->i_location.logicalBlockNum); 1874 crclen += iinfo->i_lenEAttr + iinfo->i_lenAlloc - sizeof(struct tag); 1875 fe->descTag.descCRCLength = cpu_to_le16(crclen); 1876 fe->descTag.descCRC = cpu_to_le16(crc_itu_t(0, (char *)fe + sizeof(struct tag), 1877 crclen)); 1878 fe->descTag.tagChecksum = udf_tag_checksum(&fe->descTag); 1879 1880 set_buffer_uptodate(bh); 1881 unlock_buffer(bh); 1882 1883 /* write the data blocks */ 1884 mark_buffer_dirty(bh); 1885 if (do_sync) { 1886 sync_dirty_buffer(bh); 1887 if (buffer_write_io_error(bh)) { 1888 udf_warn(inode->i_sb, "IO error syncing udf inode [%08lx]\n", 1889 inode->i_ino); 1890 err = -EIO; 1891 } 1892 } 1893 brelse(bh); 1894 1895 return err; 1896 } 1897 1898 struct inode *__udf_iget(struct super_block *sb, struct kernel_lb_addr *ino, 1899 bool hidden_inode) 1900 { 1901 unsigned long block = udf_get_lb_pblock(sb, ino, 0); 1902 struct inode *inode = iget_locked(sb, block); 1903 int err; 1904 1905 if (!inode) 1906 return ERR_PTR(-ENOMEM); 1907 1908 if (!(inode->i_state & I_NEW)) 1909 return inode; 1910 1911 memcpy(&UDF_I(inode)->i_location, ino, sizeof(struct kernel_lb_addr)); 1912 err = udf_read_inode(inode, hidden_inode); 1913 if (err < 0) { 1914 iget_failed(inode); 1915 return ERR_PTR(err); 1916 } 1917 unlock_new_inode(inode); 1918 1919 return inode; 1920 } 1921 1922 int udf_setup_indirect_aext(struct inode *inode, udf_pblk_t block, 1923 struct extent_position *epos) 1924 { 1925 struct super_block *sb = inode->i_sb; 1926 struct buffer_head *bh; 1927 struct allocExtDesc *aed; 1928 struct extent_position nepos; 1929 struct kernel_lb_addr neloc; 1930 int ver, adsize; 1931 1932 if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_SHORT) 1933 adsize = sizeof(struct short_ad); 1934 else if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_LONG) 1935 adsize = sizeof(struct long_ad); 1936 else 1937 return -EIO; 1938 1939 neloc.logicalBlockNum = block; 1940 neloc.partitionReferenceNum = epos->block.partitionReferenceNum; 1941 1942 bh = udf_tgetblk(sb, udf_get_lb_pblock(sb, &neloc, 0)); 1943 if (!bh) 1944 return -EIO; 1945 lock_buffer(bh); 1946 memset(bh->b_data, 0x00, sb->s_blocksize); 1947 set_buffer_uptodate(bh); 1948 unlock_buffer(bh); 1949 mark_buffer_dirty_inode(bh, inode); 1950 1951 aed = (struct allocExtDesc *)(bh->b_data); 1952 if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT)) { 1953 aed->previousAllocExtLocation = 1954 cpu_to_le32(epos->block.logicalBlockNum); 1955 } 1956 aed->lengthAllocDescs = cpu_to_le32(0); 1957 if (UDF_SB(sb)->s_udfrev >= 0x0200) 1958 ver = 3; 1959 else 1960 ver = 2; 1961 udf_new_tag(bh->b_data, TAG_IDENT_AED, ver, 1, block, 1962 sizeof(struct tag)); 1963 1964 nepos.block = neloc; 1965 nepos.offset = sizeof(struct allocExtDesc); 1966 nepos.bh = bh; 1967 1968 /* 1969 * Do we have to copy current last extent to make space for indirect 1970 * one? 1971 */ 1972 if (epos->offset + adsize > sb->s_blocksize) { 1973 struct kernel_lb_addr cp_loc; 1974 uint32_t cp_len; 1975 int cp_type; 1976 1977 epos->offset -= adsize; 1978 cp_type = udf_current_aext(inode, epos, &cp_loc, &cp_len, 0); 1979 cp_len |= ((uint32_t)cp_type) << 30; 1980 1981 __udf_add_aext(inode, &nepos, &cp_loc, cp_len, 1); 1982 udf_write_aext(inode, epos, &nepos.block, 1983 sb->s_blocksize | EXT_NEXT_EXTENT_ALLOCDESCS, 0); 1984 } else { 1985 __udf_add_aext(inode, epos, &nepos.block, 1986 sb->s_blocksize | EXT_NEXT_EXTENT_ALLOCDESCS, 0); 1987 } 1988 1989 brelse(epos->bh); 1990 *epos = nepos; 1991 1992 return 0; 1993 } 1994 1995 /* 1996 * Append extent at the given position - should be the first free one in inode 1997 * / indirect extent. This function assumes there is enough space in the inode 1998 * or indirect extent. Use udf_add_aext() if you didn't check for this before. 1999 */ 2000 int __udf_add_aext(struct inode *inode, struct extent_position *epos, 2001 struct kernel_lb_addr *eloc, uint32_t elen, int inc) 2002 { 2003 struct udf_inode_info *iinfo = UDF_I(inode); 2004 struct allocExtDesc *aed; 2005 int adsize; 2006 2007 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) 2008 adsize = sizeof(struct short_ad); 2009 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) 2010 adsize = sizeof(struct long_ad); 2011 else 2012 return -EIO; 2013 2014 if (!epos->bh) { 2015 WARN_ON(iinfo->i_lenAlloc != 2016 epos->offset - udf_file_entry_alloc_offset(inode)); 2017 } else { 2018 aed = (struct allocExtDesc *)epos->bh->b_data; 2019 WARN_ON(le32_to_cpu(aed->lengthAllocDescs) != 2020 epos->offset - sizeof(struct allocExtDesc)); 2021 WARN_ON(epos->offset + adsize > inode->i_sb->s_blocksize); 2022 } 2023 2024 udf_write_aext(inode, epos, eloc, elen, inc); 2025 2026 if (!epos->bh) { 2027 iinfo->i_lenAlloc += adsize; 2028 mark_inode_dirty(inode); 2029 } else { 2030 aed = (struct allocExtDesc *)epos->bh->b_data; 2031 le32_add_cpu(&aed->lengthAllocDescs, adsize); 2032 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || 2033 UDF_SB(inode->i_sb)->s_udfrev >= 0x0201) 2034 udf_update_tag(epos->bh->b_data, 2035 epos->offset + (inc ? 0 : adsize)); 2036 else 2037 udf_update_tag(epos->bh->b_data, 2038 sizeof(struct allocExtDesc)); 2039 mark_buffer_dirty_inode(epos->bh, inode); 2040 } 2041 2042 return 0; 2043 } 2044 2045 /* 2046 * Append extent at given position - should be the first free one in inode 2047 * / indirect extent. Takes care of allocating and linking indirect blocks. 2048 */ 2049 int udf_add_aext(struct inode *inode, struct extent_position *epos, 2050 struct kernel_lb_addr *eloc, uint32_t elen, int inc) 2051 { 2052 int adsize; 2053 struct super_block *sb = inode->i_sb; 2054 2055 if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_SHORT) 2056 adsize = sizeof(struct short_ad); 2057 else if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_LONG) 2058 adsize = sizeof(struct long_ad); 2059 else 2060 return -EIO; 2061 2062 if (epos->offset + (2 * adsize) > sb->s_blocksize) { 2063 int err; 2064 udf_pblk_t new_block; 2065 2066 new_block = udf_new_block(sb, NULL, 2067 epos->block.partitionReferenceNum, 2068 epos->block.logicalBlockNum, &err); 2069 if (!new_block) 2070 return -ENOSPC; 2071 2072 err = udf_setup_indirect_aext(inode, new_block, epos); 2073 if (err) 2074 return err; 2075 } 2076 2077 return __udf_add_aext(inode, epos, eloc, elen, inc); 2078 } 2079 2080 void udf_write_aext(struct inode *inode, struct extent_position *epos, 2081 struct kernel_lb_addr *eloc, uint32_t elen, int inc) 2082 { 2083 int adsize; 2084 uint8_t *ptr; 2085 struct short_ad *sad; 2086 struct long_ad *lad; 2087 struct udf_inode_info *iinfo = UDF_I(inode); 2088 2089 if (!epos->bh) 2090 ptr = iinfo->i_ext.i_data + epos->offset - 2091 udf_file_entry_alloc_offset(inode) + 2092 iinfo->i_lenEAttr; 2093 else 2094 ptr = epos->bh->b_data + epos->offset; 2095 2096 switch (iinfo->i_alloc_type) { 2097 case ICBTAG_FLAG_AD_SHORT: 2098 sad = (struct short_ad *)ptr; 2099 sad->extLength = cpu_to_le32(elen); 2100 sad->extPosition = cpu_to_le32(eloc->logicalBlockNum); 2101 adsize = sizeof(struct short_ad); 2102 break; 2103 case ICBTAG_FLAG_AD_LONG: 2104 lad = (struct long_ad *)ptr; 2105 lad->extLength = cpu_to_le32(elen); 2106 lad->extLocation = cpu_to_lelb(*eloc); 2107 memset(lad->impUse, 0x00, sizeof(lad->impUse)); 2108 adsize = sizeof(struct long_ad); 2109 break; 2110 default: 2111 return; 2112 } 2113 2114 if (epos->bh) { 2115 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || 2116 UDF_SB(inode->i_sb)->s_udfrev >= 0x0201) { 2117 struct allocExtDesc *aed = 2118 (struct allocExtDesc *)epos->bh->b_data; 2119 udf_update_tag(epos->bh->b_data, 2120 le32_to_cpu(aed->lengthAllocDescs) + 2121 sizeof(struct allocExtDesc)); 2122 } 2123 mark_buffer_dirty_inode(epos->bh, inode); 2124 } else { 2125 mark_inode_dirty(inode); 2126 } 2127 2128 if (inc) 2129 epos->offset += adsize; 2130 } 2131 2132 /* 2133 * Only 1 indirect extent in a row really makes sense but allow upto 16 in case 2134 * someone does some weird stuff. 2135 */ 2136 #define UDF_MAX_INDIR_EXTS 16 2137 2138 int8_t udf_next_aext(struct inode *inode, struct extent_position *epos, 2139 struct kernel_lb_addr *eloc, uint32_t *elen, int inc) 2140 { 2141 int8_t etype; 2142 unsigned int indirections = 0; 2143 2144 while ((etype = udf_current_aext(inode, epos, eloc, elen, inc)) == 2145 (EXT_NEXT_EXTENT_ALLOCDESCS >> 30)) { 2146 udf_pblk_t block; 2147 2148 if (++indirections > UDF_MAX_INDIR_EXTS) { 2149 udf_err(inode->i_sb, 2150 "too many indirect extents in inode %lu\n", 2151 inode->i_ino); 2152 return -1; 2153 } 2154 2155 epos->block = *eloc; 2156 epos->offset = sizeof(struct allocExtDesc); 2157 brelse(epos->bh); 2158 block = udf_get_lb_pblock(inode->i_sb, &epos->block, 0); 2159 epos->bh = udf_tread(inode->i_sb, block); 2160 if (!epos->bh) { 2161 udf_debug("reading block %u failed!\n", block); 2162 return -1; 2163 } 2164 } 2165 2166 return etype; 2167 } 2168 2169 int8_t udf_current_aext(struct inode *inode, struct extent_position *epos, 2170 struct kernel_lb_addr *eloc, uint32_t *elen, int inc) 2171 { 2172 int alen; 2173 int8_t etype; 2174 uint8_t *ptr; 2175 struct short_ad *sad; 2176 struct long_ad *lad; 2177 struct udf_inode_info *iinfo = UDF_I(inode); 2178 2179 if (!epos->bh) { 2180 if (!epos->offset) 2181 epos->offset = udf_file_entry_alloc_offset(inode); 2182 ptr = iinfo->i_ext.i_data + epos->offset - 2183 udf_file_entry_alloc_offset(inode) + 2184 iinfo->i_lenEAttr; 2185 alen = udf_file_entry_alloc_offset(inode) + 2186 iinfo->i_lenAlloc; 2187 } else { 2188 if (!epos->offset) 2189 epos->offset = sizeof(struct allocExtDesc); 2190 ptr = epos->bh->b_data + epos->offset; 2191 alen = sizeof(struct allocExtDesc) + 2192 le32_to_cpu(((struct allocExtDesc *)epos->bh->b_data)-> 2193 lengthAllocDescs); 2194 } 2195 2196 switch (iinfo->i_alloc_type) { 2197 case ICBTAG_FLAG_AD_SHORT: 2198 sad = udf_get_fileshortad(ptr, alen, &epos->offset, inc); 2199 if (!sad) 2200 return -1; 2201 etype = le32_to_cpu(sad->extLength) >> 30; 2202 eloc->logicalBlockNum = le32_to_cpu(sad->extPosition); 2203 eloc->partitionReferenceNum = 2204 iinfo->i_location.partitionReferenceNum; 2205 *elen = le32_to_cpu(sad->extLength) & UDF_EXTENT_LENGTH_MASK; 2206 break; 2207 case ICBTAG_FLAG_AD_LONG: 2208 lad = udf_get_filelongad(ptr, alen, &epos->offset, inc); 2209 if (!lad) 2210 return -1; 2211 etype = le32_to_cpu(lad->extLength) >> 30; 2212 *eloc = lelb_to_cpu(lad->extLocation); 2213 *elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK; 2214 break; 2215 default: 2216 udf_debug("alloc_type = %u unsupported\n", iinfo->i_alloc_type); 2217 return -1; 2218 } 2219 2220 return etype; 2221 } 2222 2223 static int8_t udf_insert_aext(struct inode *inode, struct extent_position epos, 2224 struct kernel_lb_addr neloc, uint32_t nelen) 2225 { 2226 struct kernel_lb_addr oeloc; 2227 uint32_t oelen; 2228 int8_t etype; 2229 2230 if (epos.bh) 2231 get_bh(epos.bh); 2232 2233 while ((etype = udf_next_aext(inode, &epos, &oeloc, &oelen, 0)) != -1) { 2234 udf_write_aext(inode, &epos, &neloc, nelen, 1); 2235 neloc = oeloc; 2236 nelen = (etype << 30) | oelen; 2237 } 2238 udf_add_aext(inode, &epos, &neloc, nelen, 1); 2239 brelse(epos.bh); 2240 2241 return (nelen >> 30); 2242 } 2243 2244 int8_t udf_delete_aext(struct inode *inode, struct extent_position epos) 2245 { 2246 struct extent_position oepos; 2247 int adsize; 2248 int8_t etype; 2249 struct allocExtDesc *aed; 2250 struct udf_inode_info *iinfo; 2251 struct kernel_lb_addr eloc; 2252 uint32_t elen; 2253 2254 if (epos.bh) { 2255 get_bh(epos.bh); 2256 get_bh(epos.bh); 2257 } 2258 2259 iinfo = UDF_I(inode); 2260 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) 2261 adsize = sizeof(struct short_ad); 2262 else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) 2263 adsize = sizeof(struct long_ad); 2264 else 2265 adsize = 0; 2266 2267 oepos = epos; 2268 if (udf_next_aext(inode, &epos, &eloc, &elen, 1) == -1) 2269 return -1; 2270 2271 while ((etype = udf_next_aext(inode, &epos, &eloc, &elen, 1)) != -1) { 2272 udf_write_aext(inode, &oepos, &eloc, (etype << 30) | elen, 1); 2273 if (oepos.bh != epos.bh) { 2274 oepos.block = epos.block; 2275 brelse(oepos.bh); 2276 get_bh(epos.bh); 2277 oepos.bh = epos.bh; 2278 oepos.offset = epos.offset - adsize; 2279 } 2280 } 2281 memset(&eloc, 0x00, sizeof(struct kernel_lb_addr)); 2282 elen = 0; 2283 2284 if (epos.bh != oepos.bh) { 2285 udf_free_blocks(inode->i_sb, inode, &epos.block, 0, 1); 2286 udf_write_aext(inode, &oepos, &eloc, elen, 1); 2287 udf_write_aext(inode, &oepos, &eloc, elen, 1); 2288 if (!oepos.bh) { 2289 iinfo->i_lenAlloc -= (adsize * 2); 2290 mark_inode_dirty(inode); 2291 } else { 2292 aed = (struct allocExtDesc *)oepos.bh->b_data; 2293 le32_add_cpu(&aed->lengthAllocDescs, -(2 * adsize)); 2294 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || 2295 UDF_SB(inode->i_sb)->s_udfrev >= 0x0201) 2296 udf_update_tag(oepos.bh->b_data, 2297 oepos.offset - (2 * adsize)); 2298 else 2299 udf_update_tag(oepos.bh->b_data, 2300 sizeof(struct allocExtDesc)); 2301 mark_buffer_dirty_inode(oepos.bh, inode); 2302 } 2303 } else { 2304 udf_write_aext(inode, &oepos, &eloc, elen, 1); 2305 if (!oepos.bh) { 2306 iinfo->i_lenAlloc -= adsize; 2307 mark_inode_dirty(inode); 2308 } else { 2309 aed = (struct allocExtDesc *)oepos.bh->b_data; 2310 le32_add_cpu(&aed->lengthAllocDescs, -adsize); 2311 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || 2312 UDF_SB(inode->i_sb)->s_udfrev >= 0x0201) 2313 udf_update_tag(oepos.bh->b_data, 2314 epos.offset - adsize); 2315 else 2316 udf_update_tag(oepos.bh->b_data, 2317 sizeof(struct allocExtDesc)); 2318 mark_buffer_dirty_inode(oepos.bh, inode); 2319 } 2320 } 2321 2322 brelse(epos.bh); 2323 brelse(oepos.bh); 2324 2325 return (elen >> 30); 2326 } 2327 2328 int8_t inode_bmap(struct inode *inode, sector_t block, 2329 struct extent_position *pos, struct kernel_lb_addr *eloc, 2330 uint32_t *elen, sector_t *offset) 2331 { 2332 unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits; 2333 loff_t lbcount = 0, bcount = (loff_t) block << blocksize_bits; 2334 int8_t etype; 2335 struct udf_inode_info *iinfo; 2336 2337 iinfo = UDF_I(inode); 2338 if (!udf_read_extent_cache(inode, bcount, &lbcount, pos)) { 2339 pos->offset = 0; 2340 pos->block = iinfo->i_location; 2341 pos->bh = NULL; 2342 } 2343 *elen = 0; 2344 do { 2345 etype = udf_next_aext(inode, pos, eloc, elen, 1); 2346 if (etype == -1) { 2347 *offset = (bcount - lbcount) >> blocksize_bits; 2348 iinfo->i_lenExtents = lbcount; 2349 return -1; 2350 } 2351 lbcount += *elen; 2352 } while (lbcount <= bcount); 2353 /* update extent cache */ 2354 udf_update_extent_cache(inode, lbcount - *elen, pos); 2355 *offset = (bcount + *elen - lbcount) >> blocksize_bits; 2356 2357 return etype; 2358 } 2359 2360 udf_pblk_t udf_block_map(struct inode *inode, sector_t block) 2361 { 2362 struct kernel_lb_addr eloc; 2363 uint32_t elen; 2364 sector_t offset; 2365 struct extent_position epos = {}; 2366 udf_pblk_t ret; 2367 2368 down_read(&UDF_I(inode)->i_data_sem); 2369 2370 if (inode_bmap(inode, block, &epos, &eloc, &elen, &offset) == 2371 (EXT_RECORDED_ALLOCATED >> 30)) 2372 ret = udf_get_lb_pblock(inode->i_sb, &eloc, offset); 2373 else 2374 ret = 0; 2375 2376 up_read(&UDF_I(inode)->i_data_sem); 2377 brelse(epos.bh); 2378 2379 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_VARCONV)) 2380 return udf_fixed_to_variable(ret); 2381 else 2382 return ret; 2383 } 2384