1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/fs/hfsplus/inode.c 4 * 5 * Copyright (C) 2001 6 * Brad Boyer (flar@allandria.com) 7 * (C) 2003 Ardis Technologies <roman@ardistech.com> 8 * 9 * Inode handling routines 10 */ 11 12 #include <linux/blkdev.h> 13 #include <linux/mm.h> 14 #include <linux/fs.h> 15 #include <linux/pagemap.h> 16 #include <linux/mpage.h> 17 #include <linux/sched.h> 18 #include <linux/cred.h> 19 #include <linux/uio.h> 20 21 #include "hfsplus_fs.h" 22 #include "hfsplus_raw.h" 23 #include "xattr.h" 24 25 static int hfsplus_readpage(struct file *file, struct page *page) 26 { 27 return block_read_full_page(page, hfsplus_get_block); 28 } 29 30 static int hfsplus_writepage(struct page *page, struct writeback_control *wbc) 31 { 32 return block_write_full_page(page, hfsplus_get_block, wbc); 33 } 34 35 static void hfsplus_write_failed(struct address_space *mapping, loff_t to) 36 { 37 struct inode *inode = mapping->host; 38 39 if (to > inode->i_size) { 40 truncate_pagecache(inode, inode->i_size); 41 hfsplus_file_truncate(inode); 42 } 43 } 44 45 static int hfsplus_write_begin(struct file *file, struct address_space *mapping, 46 loff_t pos, unsigned len, unsigned flags, 47 struct page **pagep, void **fsdata) 48 { 49 int ret; 50 51 *pagep = NULL; 52 ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata, 53 hfsplus_get_block, 54 &HFSPLUS_I(mapping->host)->phys_size); 55 if (unlikely(ret)) 56 hfsplus_write_failed(mapping, pos + len); 57 58 return ret; 59 } 60 61 static sector_t hfsplus_bmap(struct address_space *mapping, sector_t block) 62 { 63 return generic_block_bmap(mapping, block, hfsplus_get_block); 64 } 65 66 static int hfsplus_releasepage(struct page *page, gfp_t mask) 67 { 68 struct inode *inode = page->mapping->host; 69 struct super_block *sb = inode->i_sb; 70 struct hfs_btree *tree; 71 struct hfs_bnode *node; 72 u32 nidx; 73 int i, res = 1; 74 75 switch (inode->i_ino) { 76 case HFSPLUS_EXT_CNID: 77 tree = HFSPLUS_SB(sb)->ext_tree; 78 break; 79 case HFSPLUS_CAT_CNID: 80 tree = HFSPLUS_SB(sb)->cat_tree; 81 break; 82 case HFSPLUS_ATTR_CNID: 83 tree = HFSPLUS_SB(sb)->attr_tree; 84 break; 85 default: 86 BUG(); 87 return 0; 88 } 89 if (!tree) 90 return 0; 91 if (tree->node_size >= PAGE_SIZE) { 92 nidx = page->index >> 93 (tree->node_size_shift - PAGE_SHIFT); 94 spin_lock(&tree->hash_lock); 95 node = hfs_bnode_findhash(tree, nidx); 96 if (!node) 97 ; 98 else if (atomic_read(&node->refcnt)) 99 res = 0; 100 if (res && node) { 101 hfs_bnode_unhash(node); 102 hfs_bnode_free(node); 103 } 104 spin_unlock(&tree->hash_lock); 105 } else { 106 nidx = page->index << 107 (PAGE_SHIFT - tree->node_size_shift); 108 i = 1 << (PAGE_SHIFT - tree->node_size_shift); 109 spin_lock(&tree->hash_lock); 110 do { 111 node = hfs_bnode_findhash(tree, nidx++); 112 if (!node) 113 continue; 114 if (atomic_read(&node->refcnt)) { 115 res = 0; 116 break; 117 } 118 hfs_bnode_unhash(node); 119 hfs_bnode_free(node); 120 } while (--i && nidx < tree->node_count); 121 spin_unlock(&tree->hash_lock); 122 } 123 return res ? try_to_free_buffers(page) : 0; 124 } 125 126 static ssize_t hfsplus_direct_IO(struct kiocb *iocb, struct iov_iter *iter) 127 { 128 struct file *file = iocb->ki_filp; 129 struct address_space *mapping = file->f_mapping; 130 struct inode *inode = mapping->host; 131 size_t count = iov_iter_count(iter); 132 ssize_t ret; 133 134 ret = blockdev_direct_IO(iocb, inode, iter, hfsplus_get_block); 135 136 /* 137 * In case of error extending write may have instantiated a few 138 * blocks outside i_size. Trim these off again. 139 */ 140 if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) { 141 loff_t isize = i_size_read(inode); 142 loff_t end = iocb->ki_pos + count; 143 144 if (end > isize) 145 hfsplus_write_failed(mapping, end); 146 } 147 148 return ret; 149 } 150 151 static int hfsplus_writepages(struct address_space *mapping, 152 struct writeback_control *wbc) 153 { 154 return mpage_writepages(mapping, wbc, hfsplus_get_block); 155 } 156 157 const struct address_space_operations hfsplus_btree_aops = { 158 .readpage = hfsplus_readpage, 159 .writepage = hfsplus_writepage, 160 .write_begin = hfsplus_write_begin, 161 .write_end = generic_write_end, 162 .bmap = hfsplus_bmap, 163 .releasepage = hfsplus_releasepage, 164 }; 165 166 const struct address_space_operations hfsplus_aops = { 167 .readpage = hfsplus_readpage, 168 .writepage = hfsplus_writepage, 169 .write_begin = hfsplus_write_begin, 170 .write_end = generic_write_end, 171 .bmap = hfsplus_bmap, 172 .direct_IO = hfsplus_direct_IO, 173 .writepages = hfsplus_writepages, 174 }; 175 176 const struct dentry_operations hfsplus_dentry_operations = { 177 .d_hash = hfsplus_hash_dentry, 178 .d_compare = hfsplus_compare_dentry, 179 }; 180 181 static void hfsplus_get_perms(struct inode *inode, 182 struct hfsplus_perm *perms, int dir) 183 { 184 struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb); 185 u16 mode; 186 187 mode = be16_to_cpu(perms->mode); 188 189 i_uid_write(inode, be32_to_cpu(perms->owner)); 190 if (!i_uid_read(inode) && !mode) 191 inode->i_uid = sbi->uid; 192 193 i_gid_write(inode, be32_to_cpu(perms->group)); 194 if (!i_gid_read(inode) && !mode) 195 inode->i_gid = sbi->gid; 196 197 if (dir) { 198 mode = mode ? (mode & S_IALLUGO) : (S_IRWXUGO & ~(sbi->umask)); 199 mode |= S_IFDIR; 200 } else if (!mode) 201 mode = S_IFREG | ((S_IRUGO|S_IWUGO) & ~(sbi->umask)); 202 inode->i_mode = mode; 203 204 HFSPLUS_I(inode)->userflags = perms->userflags; 205 if (perms->rootflags & HFSPLUS_FLG_IMMUTABLE) 206 inode->i_flags |= S_IMMUTABLE; 207 else 208 inode->i_flags &= ~S_IMMUTABLE; 209 if (perms->rootflags & HFSPLUS_FLG_APPEND) 210 inode->i_flags |= S_APPEND; 211 else 212 inode->i_flags &= ~S_APPEND; 213 } 214 215 static int hfsplus_file_open(struct inode *inode, struct file *file) 216 { 217 if (HFSPLUS_IS_RSRC(inode)) 218 inode = HFSPLUS_I(inode)->rsrc_inode; 219 if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS) 220 return -EOVERFLOW; 221 atomic_inc(&HFSPLUS_I(inode)->opencnt); 222 return 0; 223 } 224 225 static int hfsplus_file_release(struct inode *inode, struct file *file) 226 { 227 struct super_block *sb = inode->i_sb; 228 229 if (HFSPLUS_IS_RSRC(inode)) 230 inode = HFSPLUS_I(inode)->rsrc_inode; 231 if (atomic_dec_and_test(&HFSPLUS_I(inode)->opencnt)) { 232 inode_lock(inode); 233 hfsplus_file_truncate(inode); 234 if (inode->i_flags & S_DEAD) { 235 hfsplus_delete_cat(inode->i_ino, 236 HFSPLUS_SB(sb)->hidden_dir, NULL); 237 hfsplus_delete_inode(inode); 238 } 239 inode_unlock(inode); 240 } 241 return 0; 242 } 243 244 static int hfsplus_setattr(struct dentry *dentry, struct iattr *attr) 245 { 246 struct inode *inode = d_inode(dentry); 247 int error; 248 249 error = setattr_prepare(dentry, attr); 250 if (error) 251 return error; 252 253 if ((attr->ia_valid & ATTR_SIZE) && 254 attr->ia_size != i_size_read(inode)) { 255 inode_dio_wait(inode); 256 if (attr->ia_size > inode->i_size) { 257 error = generic_cont_expand_simple(inode, 258 attr->ia_size); 259 if (error) 260 return error; 261 } 262 truncate_setsize(inode, attr->ia_size); 263 hfsplus_file_truncate(inode); 264 inode->i_mtime = inode->i_ctime = current_time(inode); 265 } 266 267 setattr_copy(inode, attr); 268 mark_inode_dirty(inode); 269 270 return 0; 271 } 272 273 int hfsplus_getattr(const struct path *path, struct kstat *stat, 274 u32 request_mask, unsigned int query_flags) 275 { 276 struct inode *inode = d_inode(path->dentry); 277 struct hfsplus_inode_info *hip = HFSPLUS_I(inode); 278 279 if (inode->i_flags & S_APPEND) 280 stat->attributes |= STATX_ATTR_APPEND; 281 if (inode->i_flags & S_IMMUTABLE) 282 stat->attributes |= STATX_ATTR_IMMUTABLE; 283 if (hip->userflags & HFSPLUS_FLG_NODUMP) 284 stat->attributes |= STATX_ATTR_NODUMP; 285 286 stat->attributes_mask |= STATX_ATTR_APPEND | STATX_ATTR_IMMUTABLE | 287 STATX_ATTR_NODUMP; 288 289 generic_fillattr(inode, stat); 290 return 0; 291 } 292 293 int hfsplus_file_fsync(struct file *file, loff_t start, loff_t end, 294 int datasync) 295 { 296 struct inode *inode = file->f_mapping->host; 297 struct hfsplus_inode_info *hip = HFSPLUS_I(inode); 298 struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb); 299 int error = 0, error2; 300 301 error = file_write_and_wait_range(file, start, end); 302 if (error) 303 return error; 304 inode_lock(inode); 305 306 /* 307 * Sync inode metadata into the catalog and extent trees. 308 */ 309 sync_inode_metadata(inode, 1); 310 311 /* 312 * And explicitly write out the btrees. 313 */ 314 if (test_and_clear_bit(HFSPLUS_I_CAT_DIRTY, &hip->flags)) 315 error = filemap_write_and_wait(sbi->cat_tree->inode->i_mapping); 316 317 if (test_and_clear_bit(HFSPLUS_I_EXT_DIRTY, &hip->flags)) { 318 error2 = 319 filemap_write_and_wait(sbi->ext_tree->inode->i_mapping); 320 if (!error) 321 error = error2; 322 } 323 324 if (test_and_clear_bit(HFSPLUS_I_ATTR_DIRTY, &hip->flags)) { 325 if (sbi->attr_tree) { 326 error2 = 327 filemap_write_and_wait( 328 sbi->attr_tree->inode->i_mapping); 329 if (!error) 330 error = error2; 331 } else { 332 pr_err("sync non-existent attributes tree\n"); 333 } 334 } 335 336 if (test_and_clear_bit(HFSPLUS_I_ALLOC_DIRTY, &hip->flags)) { 337 error2 = filemap_write_and_wait(sbi->alloc_file->i_mapping); 338 if (!error) 339 error = error2; 340 } 341 342 if (!test_bit(HFSPLUS_SB_NOBARRIER, &sbi->flags)) 343 blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL); 344 345 inode_unlock(inode); 346 347 return error; 348 } 349 350 static const struct inode_operations hfsplus_file_inode_operations = { 351 .setattr = hfsplus_setattr, 352 .getattr = hfsplus_getattr, 353 .listxattr = hfsplus_listxattr, 354 }; 355 356 static const struct file_operations hfsplus_file_operations = { 357 .llseek = generic_file_llseek, 358 .read_iter = generic_file_read_iter, 359 .write_iter = generic_file_write_iter, 360 .mmap = generic_file_mmap, 361 .splice_read = generic_file_splice_read, 362 .fsync = hfsplus_file_fsync, 363 .open = hfsplus_file_open, 364 .release = hfsplus_file_release, 365 .unlocked_ioctl = hfsplus_ioctl, 366 }; 367 368 struct inode *hfsplus_new_inode(struct super_block *sb, struct inode *dir, 369 umode_t mode) 370 { 371 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); 372 struct inode *inode = new_inode(sb); 373 struct hfsplus_inode_info *hip; 374 375 if (!inode) 376 return NULL; 377 378 inode->i_ino = sbi->next_cnid++; 379 inode_init_owner(inode, dir, mode); 380 set_nlink(inode, 1); 381 inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode); 382 383 hip = HFSPLUS_I(inode); 384 INIT_LIST_HEAD(&hip->open_dir_list); 385 spin_lock_init(&hip->open_dir_lock); 386 mutex_init(&hip->extents_lock); 387 atomic_set(&hip->opencnt, 0); 388 hip->extent_state = 0; 389 hip->flags = 0; 390 hip->userflags = 0; 391 hip->subfolders = 0; 392 memset(hip->first_extents, 0, sizeof(hfsplus_extent_rec)); 393 memset(hip->cached_extents, 0, sizeof(hfsplus_extent_rec)); 394 hip->alloc_blocks = 0; 395 hip->first_blocks = 0; 396 hip->cached_start = 0; 397 hip->cached_blocks = 0; 398 hip->phys_size = 0; 399 hip->fs_blocks = 0; 400 hip->rsrc_inode = NULL; 401 if (S_ISDIR(inode->i_mode)) { 402 inode->i_size = 2; 403 sbi->folder_count++; 404 inode->i_op = &hfsplus_dir_inode_operations; 405 inode->i_fop = &hfsplus_dir_operations; 406 } else if (S_ISREG(inode->i_mode)) { 407 sbi->file_count++; 408 inode->i_op = &hfsplus_file_inode_operations; 409 inode->i_fop = &hfsplus_file_operations; 410 inode->i_mapping->a_ops = &hfsplus_aops; 411 hip->clump_blocks = sbi->data_clump_blocks; 412 } else if (S_ISLNK(inode->i_mode)) { 413 sbi->file_count++; 414 inode->i_op = &page_symlink_inode_operations; 415 inode_nohighmem(inode); 416 inode->i_mapping->a_ops = &hfsplus_aops; 417 hip->clump_blocks = 1; 418 } else 419 sbi->file_count++; 420 insert_inode_hash(inode); 421 mark_inode_dirty(inode); 422 hfsplus_mark_mdb_dirty(sb); 423 424 return inode; 425 } 426 427 void hfsplus_delete_inode(struct inode *inode) 428 { 429 struct super_block *sb = inode->i_sb; 430 431 if (S_ISDIR(inode->i_mode)) { 432 HFSPLUS_SB(sb)->folder_count--; 433 hfsplus_mark_mdb_dirty(sb); 434 return; 435 } 436 HFSPLUS_SB(sb)->file_count--; 437 if (S_ISREG(inode->i_mode)) { 438 if (!inode->i_nlink) { 439 inode->i_size = 0; 440 hfsplus_file_truncate(inode); 441 } 442 } else if (S_ISLNK(inode->i_mode)) { 443 inode->i_size = 0; 444 hfsplus_file_truncate(inode); 445 } 446 hfsplus_mark_mdb_dirty(sb); 447 } 448 449 void hfsplus_inode_read_fork(struct inode *inode, struct hfsplus_fork_raw *fork) 450 { 451 struct super_block *sb = inode->i_sb; 452 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); 453 struct hfsplus_inode_info *hip = HFSPLUS_I(inode); 454 u32 count; 455 int i; 456 457 memcpy(&hip->first_extents, &fork->extents, sizeof(hfsplus_extent_rec)); 458 for (count = 0, i = 0; i < 8; i++) 459 count += be32_to_cpu(fork->extents[i].block_count); 460 hip->first_blocks = count; 461 memset(hip->cached_extents, 0, sizeof(hfsplus_extent_rec)); 462 hip->cached_start = 0; 463 hip->cached_blocks = 0; 464 465 hip->alloc_blocks = be32_to_cpu(fork->total_blocks); 466 hip->phys_size = inode->i_size = be64_to_cpu(fork->total_size); 467 hip->fs_blocks = 468 (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits; 469 inode_set_bytes(inode, hip->fs_blocks << sb->s_blocksize_bits); 470 hip->clump_blocks = 471 be32_to_cpu(fork->clump_size) >> sbi->alloc_blksz_shift; 472 if (!hip->clump_blocks) { 473 hip->clump_blocks = HFSPLUS_IS_RSRC(inode) ? 474 sbi->rsrc_clump_blocks : 475 sbi->data_clump_blocks; 476 } 477 } 478 479 void hfsplus_inode_write_fork(struct inode *inode, 480 struct hfsplus_fork_raw *fork) 481 { 482 memcpy(&fork->extents, &HFSPLUS_I(inode)->first_extents, 483 sizeof(hfsplus_extent_rec)); 484 fork->total_size = cpu_to_be64(inode->i_size); 485 fork->total_blocks = cpu_to_be32(HFSPLUS_I(inode)->alloc_blocks); 486 } 487 488 int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd) 489 { 490 hfsplus_cat_entry entry; 491 int res = 0; 492 u16 type; 493 494 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset); 495 496 HFSPLUS_I(inode)->linkid = 0; 497 if (type == HFSPLUS_FOLDER) { 498 struct hfsplus_cat_folder *folder = &entry.folder; 499 500 if (fd->entrylength < sizeof(struct hfsplus_cat_folder)) 501 /* panic? */; 502 hfs_bnode_read(fd->bnode, &entry, fd->entryoffset, 503 sizeof(struct hfsplus_cat_folder)); 504 hfsplus_get_perms(inode, &folder->permissions, 1); 505 set_nlink(inode, 1); 506 inode->i_size = 2 + be32_to_cpu(folder->valence); 507 inode->i_atime = timespec_to_timespec64(hfsp_mt2ut(folder->access_date)); 508 inode->i_mtime = timespec_to_timespec64(hfsp_mt2ut(folder->content_mod_date)); 509 inode->i_ctime = timespec_to_timespec64(hfsp_mt2ut(folder->attribute_mod_date)); 510 HFSPLUS_I(inode)->create_date = folder->create_date; 511 HFSPLUS_I(inode)->fs_blocks = 0; 512 if (folder->flags & cpu_to_be16(HFSPLUS_HAS_FOLDER_COUNT)) { 513 HFSPLUS_I(inode)->subfolders = 514 be32_to_cpu(folder->subfolders); 515 } 516 inode->i_op = &hfsplus_dir_inode_operations; 517 inode->i_fop = &hfsplus_dir_operations; 518 } else if (type == HFSPLUS_FILE) { 519 struct hfsplus_cat_file *file = &entry.file; 520 521 if (fd->entrylength < sizeof(struct hfsplus_cat_file)) 522 /* panic? */; 523 hfs_bnode_read(fd->bnode, &entry, fd->entryoffset, 524 sizeof(struct hfsplus_cat_file)); 525 526 hfsplus_inode_read_fork(inode, HFSPLUS_IS_RSRC(inode) ? 527 &file->rsrc_fork : &file->data_fork); 528 hfsplus_get_perms(inode, &file->permissions, 0); 529 set_nlink(inode, 1); 530 if (S_ISREG(inode->i_mode)) { 531 if (file->permissions.dev) 532 set_nlink(inode, 533 be32_to_cpu(file->permissions.dev)); 534 inode->i_op = &hfsplus_file_inode_operations; 535 inode->i_fop = &hfsplus_file_operations; 536 inode->i_mapping->a_ops = &hfsplus_aops; 537 } else if (S_ISLNK(inode->i_mode)) { 538 inode->i_op = &page_symlink_inode_operations; 539 inode_nohighmem(inode); 540 inode->i_mapping->a_ops = &hfsplus_aops; 541 } else { 542 init_special_inode(inode, inode->i_mode, 543 be32_to_cpu(file->permissions.dev)); 544 } 545 inode->i_atime = timespec_to_timespec64(hfsp_mt2ut(file->access_date)); 546 inode->i_mtime = timespec_to_timespec64(hfsp_mt2ut(file->content_mod_date)); 547 inode->i_ctime = timespec_to_timespec64(hfsp_mt2ut(file->attribute_mod_date)); 548 HFSPLUS_I(inode)->create_date = file->create_date; 549 } else { 550 pr_err("bad catalog entry used to create inode\n"); 551 res = -EIO; 552 } 553 return res; 554 } 555 556 int hfsplus_cat_write_inode(struct inode *inode) 557 { 558 struct inode *main_inode = inode; 559 struct hfs_find_data fd; 560 hfsplus_cat_entry entry; 561 562 if (HFSPLUS_IS_RSRC(inode)) 563 main_inode = HFSPLUS_I(inode)->rsrc_inode; 564 565 if (!main_inode->i_nlink) 566 return 0; 567 568 if (hfs_find_init(HFSPLUS_SB(main_inode->i_sb)->cat_tree, &fd)) 569 /* panic? */ 570 return -EIO; 571 572 if (hfsplus_find_cat(main_inode->i_sb, main_inode->i_ino, &fd)) 573 /* panic? */ 574 goto out; 575 576 if (S_ISDIR(main_inode->i_mode)) { 577 struct hfsplus_cat_folder *folder = &entry.folder; 578 579 if (fd.entrylength < sizeof(struct hfsplus_cat_folder)) 580 /* panic? */; 581 hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, 582 sizeof(struct hfsplus_cat_folder)); 583 /* simple node checks? */ 584 hfsplus_cat_set_perms(inode, &folder->permissions); 585 folder->access_date = hfsp_ut2mt(inode->i_atime); 586 folder->content_mod_date = hfsp_ut2mt(inode->i_mtime); 587 folder->attribute_mod_date = hfsp_ut2mt(inode->i_ctime); 588 folder->valence = cpu_to_be32(inode->i_size - 2); 589 if (folder->flags & cpu_to_be16(HFSPLUS_HAS_FOLDER_COUNT)) { 590 folder->subfolders = 591 cpu_to_be32(HFSPLUS_I(inode)->subfolders); 592 } 593 hfs_bnode_write(fd.bnode, &entry, fd.entryoffset, 594 sizeof(struct hfsplus_cat_folder)); 595 } else if (HFSPLUS_IS_RSRC(inode)) { 596 struct hfsplus_cat_file *file = &entry.file; 597 hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, 598 sizeof(struct hfsplus_cat_file)); 599 hfsplus_inode_write_fork(inode, &file->rsrc_fork); 600 hfs_bnode_write(fd.bnode, &entry, fd.entryoffset, 601 sizeof(struct hfsplus_cat_file)); 602 } else { 603 struct hfsplus_cat_file *file = &entry.file; 604 605 if (fd.entrylength < sizeof(struct hfsplus_cat_file)) 606 /* panic? */; 607 hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, 608 sizeof(struct hfsplus_cat_file)); 609 hfsplus_inode_write_fork(inode, &file->data_fork); 610 hfsplus_cat_set_perms(inode, &file->permissions); 611 if (HFSPLUS_FLG_IMMUTABLE & 612 (file->permissions.rootflags | 613 file->permissions.userflags)) 614 file->flags |= cpu_to_be16(HFSPLUS_FILE_LOCKED); 615 else 616 file->flags &= cpu_to_be16(~HFSPLUS_FILE_LOCKED); 617 file->access_date = hfsp_ut2mt(inode->i_atime); 618 file->content_mod_date = hfsp_ut2mt(inode->i_mtime); 619 file->attribute_mod_date = hfsp_ut2mt(inode->i_ctime); 620 hfs_bnode_write(fd.bnode, &entry, fd.entryoffset, 621 sizeof(struct hfsplus_cat_file)); 622 } 623 624 set_bit(HFSPLUS_I_CAT_DIRTY, &HFSPLUS_I(inode)->flags); 625 out: 626 hfs_find_exit(&fd); 627 return 0; 628 } 629