1 /* 2 * linux/fs/hfsplus/inode.c 3 * 4 * Copyright (C) 2001 5 * Brad Boyer (flar@allandria.com) 6 * (C) 2003 Ardis Technologies <roman@ardistech.com> 7 * 8 * Inode handling routines 9 */ 10 11 #include <linux/mm.h> 12 #include <linux/fs.h> 13 #include <linux/pagemap.h> 14 #include <linux/mpage.h> 15 #include <linux/sched.h> 16 17 #include "hfsplus_fs.h" 18 #include "hfsplus_raw.h" 19 20 static int hfsplus_readpage(struct file *file, struct page *page) 21 { 22 return block_read_full_page(page, hfsplus_get_block); 23 } 24 25 static int hfsplus_writepage(struct page *page, struct writeback_control *wbc) 26 { 27 return block_write_full_page(page, hfsplus_get_block, wbc); 28 } 29 30 static int hfsplus_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to) 31 { 32 return cont_prepare_write(page, from, to, hfsplus_get_block, 33 &HFSPLUS_I(page->mapping->host).phys_size); 34 } 35 36 static sector_t hfsplus_bmap(struct address_space *mapping, sector_t block) 37 { 38 return generic_block_bmap(mapping, block, hfsplus_get_block); 39 } 40 41 static int hfsplus_releasepage(struct page *page, gfp_t mask) 42 { 43 struct inode *inode = page->mapping->host; 44 struct super_block *sb = inode->i_sb; 45 struct hfs_btree *tree; 46 struct hfs_bnode *node; 47 u32 nidx; 48 int i, res = 1; 49 50 switch (inode->i_ino) { 51 case HFSPLUS_EXT_CNID: 52 tree = HFSPLUS_SB(sb).ext_tree; 53 break; 54 case HFSPLUS_CAT_CNID: 55 tree = HFSPLUS_SB(sb).cat_tree; 56 break; 57 case HFSPLUS_ATTR_CNID: 58 tree = HFSPLUS_SB(sb).attr_tree; 59 break; 60 default: 61 BUG(); 62 return 0; 63 } 64 if (tree->node_size >= PAGE_CACHE_SIZE) { 65 nidx = page->index >> (tree->node_size_shift - PAGE_CACHE_SHIFT); 66 spin_lock(&tree->hash_lock); 67 node = hfs_bnode_findhash(tree, nidx); 68 if (!node) 69 ; 70 else if (atomic_read(&node->refcnt)) 71 res = 0; 72 if (res && node) { 73 hfs_bnode_unhash(node); 74 hfs_bnode_free(node); 75 } 76 spin_unlock(&tree->hash_lock); 77 } else { 78 nidx = page->index << (PAGE_CACHE_SHIFT - tree->node_size_shift); 79 i = 1 << (PAGE_CACHE_SHIFT - tree->node_size_shift); 80 spin_lock(&tree->hash_lock); 81 do { 82 node = hfs_bnode_findhash(tree, nidx++); 83 if (!node) 84 continue; 85 if (atomic_read(&node->refcnt)) { 86 res = 0; 87 break; 88 } 89 hfs_bnode_unhash(node); 90 hfs_bnode_free(node); 91 } while (--i && nidx < tree->node_count); 92 spin_unlock(&tree->hash_lock); 93 } 94 return res ? try_to_free_buffers(page) : 0; 95 } 96 97 static ssize_t hfsplus_direct_IO(int rw, struct kiocb *iocb, 98 const struct iovec *iov, loff_t offset, unsigned long nr_segs) 99 { 100 struct file *file = iocb->ki_filp; 101 struct inode *inode = file->f_path.dentry->d_inode->i_mapping->host; 102 103 return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, 104 offset, nr_segs, hfsplus_get_block, NULL); 105 } 106 107 static int hfsplus_writepages(struct address_space *mapping, 108 struct writeback_control *wbc) 109 { 110 return mpage_writepages(mapping, wbc, hfsplus_get_block); 111 } 112 113 const struct address_space_operations hfsplus_btree_aops = { 114 .readpage = hfsplus_readpage, 115 .writepage = hfsplus_writepage, 116 .sync_page = block_sync_page, 117 .prepare_write = hfsplus_prepare_write, 118 .commit_write = generic_commit_write, 119 .bmap = hfsplus_bmap, 120 .releasepage = hfsplus_releasepage, 121 }; 122 123 const struct address_space_operations hfsplus_aops = { 124 .readpage = hfsplus_readpage, 125 .writepage = hfsplus_writepage, 126 .sync_page = block_sync_page, 127 .prepare_write = hfsplus_prepare_write, 128 .commit_write = generic_commit_write, 129 .bmap = hfsplus_bmap, 130 .direct_IO = hfsplus_direct_IO, 131 .writepages = hfsplus_writepages, 132 }; 133 134 struct dentry_operations hfsplus_dentry_operations = { 135 .d_hash = hfsplus_hash_dentry, 136 .d_compare = hfsplus_compare_dentry, 137 }; 138 139 static struct dentry *hfsplus_file_lookup(struct inode *dir, struct dentry *dentry, 140 struct nameidata *nd) 141 { 142 struct hfs_find_data fd; 143 struct super_block *sb = dir->i_sb; 144 struct inode *inode = NULL; 145 int err; 146 147 if (HFSPLUS_IS_RSRC(dir) || strcmp(dentry->d_name.name, "rsrc")) 148 goto out; 149 150 inode = HFSPLUS_I(dir).rsrc_inode; 151 if (inode) 152 goto out; 153 154 inode = new_inode(sb); 155 if (!inode) 156 return ERR_PTR(-ENOMEM); 157 158 inode->i_ino = dir->i_ino; 159 INIT_LIST_HEAD(&HFSPLUS_I(inode).open_dir_list); 160 init_MUTEX(&HFSPLUS_I(inode).extents_lock); 161 HFSPLUS_I(inode).flags = HFSPLUS_FLG_RSRC; 162 163 hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd); 164 err = hfsplus_find_cat(sb, dir->i_ino, &fd); 165 if (!err) 166 err = hfsplus_cat_read_inode(inode, &fd); 167 hfs_find_exit(&fd); 168 if (err) { 169 iput(inode); 170 return ERR_PTR(err); 171 } 172 HFSPLUS_I(inode).rsrc_inode = dir; 173 HFSPLUS_I(dir).rsrc_inode = inode; 174 igrab(dir); 175 hlist_add_head(&inode->i_hash, &HFSPLUS_SB(sb).rsrc_inodes); 176 mark_inode_dirty(inode); 177 out: 178 d_add(dentry, inode); 179 return NULL; 180 } 181 182 static void hfsplus_get_perms(struct inode *inode, struct hfsplus_perm *perms, int dir) 183 { 184 struct super_block *sb = inode->i_sb; 185 u16 mode; 186 187 mode = be16_to_cpu(perms->mode); 188 189 inode->i_uid = be32_to_cpu(perms->owner); 190 if (!inode->i_uid && !mode) 191 inode->i_uid = HFSPLUS_SB(sb).uid; 192 193 inode->i_gid = be32_to_cpu(perms->group); 194 if (!inode->i_gid && !mode) 195 inode->i_gid = HFSPLUS_SB(sb).gid; 196 197 if (dir) { 198 mode = mode ? (mode & S_IALLUGO) : 199 (S_IRWXUGO & ~(HFSPLUS_SB(sb).umask)); 200 mode |= S_IFDIR; 201 } else if (!mode) 202 mode = S_IFREG | ((S_IRUGO|S_IWUGO) & 203 ~(HFSPLUS_SB(sb).umask)); 204 inode->i_mode = mode; 205 206 HFSPLUS_I(inode).rootflags = perms->rootflags; 207 HFSPLUS_I(inode).userflags = perms->userflags; 208 if (perms->rootflags & HFSPLUS_FLG_IMMUTABLE) 209 inode->i_flags |= S_IMMUTABLE; 210 else 211 inode->i_flags &= ~S_IMMUTABLE; 212 if (perms->rootflags & HFSPLUS_FLG_APPEND) 213 inode->i_flags |= S_APPEND; 214 else 215 inode->i_flags &= ~S_APPEND; 216 } 217 218 static void hfsplus_set_perms(struct inode *inode, struct hfsplus_perm *perms) 219 { 220 if (inode->i_flags & S_IMMUTABLE) 221 perms->rootflags |= HFSPLUS_FLG_IMMUTABLE; 222 else 223 perms->rootflags &= ~HFSPLUS_FLG_IMMUTABLE; 224 if (inode->i_flags & S_APPEND) 225 perms->rootflags |= HFSPLUS_FLG_APPEND; 226 else 227 perms->rootflags &= ~HFSPLUS_FLG_APPEND; 228 perms->userflags = HFSPLUS_I(inode).userflags; 229 perms->mode = cpu_to_be16(inode->i_mode); 230 perms->owner = cpu_to_be32(inode->i_uid); 231 perms->group = cpu_to_be32(inode->i_gid); 232 perms->dev = cpu_to_be32(HFSPLUS_I(inode).dev); 233 } 234 235 static int hfsplus_permission(struct inode *inode, int mask, struct nameidata *nd) 236 { 237 /* MAY_EXEC is also used for lookup, if no x bit is set allow lookup, 238 * open_exec has the same test, so it's still not executable, if a x bit 239 * is set fall back to standard permission check. 240 */ 241 if (S_ISREG(inode->i_mode) && mask & MAY_EXEC && !(inode->i_mode & 0111)) 242 return 0; 243 return generic_permission(inode, mask, NULL); 244 } 245 246 247 static int hfsplus_file_open(struct inode *inode, struct file *file) 248 { 249 if (HFSPLUS_IS_RSRC(inode)) 250 inode = HFSPLUS_I(inode).rsrc_inode; 251 if (atomic_read(&file->f_count) != 1) 252 return 0; 253 atomic_inc(&HFSPLUS_I(inode).opencnt); 254 return 0; 255 } 256 257 static int hfsplus_file_release(struct inode *inode, struct file *file) 258 { 259 struct super_block *sb = inode->i_sb; 260 261 if (HFSPLUS_IS_RSRC(inode)) 262 inode = HFSPLUS_I(inode).rsrc_inode; 263 if (atomic_read(&file->f_count) != 0) 264 return 0; 265 if (atomic_dec_and_test(&HFSPLUS_I(inode).opencnt)) { 266 mutex_lock(&inode->i_mutex); 267 hfsplus_file_truncate(inode); 268 if (inode->i_flags & S_DEAD) { 269 hfsplus_delete_cat(inode->i_ino, HFSPLUS_SB(sb).hidden_dir, NULL); 270 hfsplus_delete_inode(inode); 271 } 272 mutex_unlock(&inode->i_mutex); 273 } 274 return 0; 275 } 276 277 extern const struct inode_operations hfsplus_dir_inode_operations; 278 extern struct file_operations hfsplus_dir_operations; 279 280 static const struct inode_operations hfsplus_file_inode_operations = { 281 .lookup = hfsplus_file_lookup, 282 .truncate = hfsplus_file_truncate, 283 .permission = hfsplus_permission, 284 .setxattr = hfsplus_setxattr, 285 .getxattr = hfsplus_getxattr, 286 .listxattr = hfsplus_listxattr, 287 }; 288 289 static const struct file_operations hfsplus_file_operations = { 290 .llseek = generic_file_llseek, 291 .read = do_sync_read, 292 .aio_read = generic_file_aio_read, 293 .write = do_sync_write, 294 .aio_write = generic_file_aio_write, 295 .mmap = generic_file_mmap, 296 .splice_read = generic_file_splice_read, 297 .fsync = file_fsync, 298 .open = hfsplus_file_open, 299 .release = hfsplus_file_release, 300 .ioctl = hfsplus_ioctl, 301 }; 302 303 struct inode *hfsplus_new_inode(struct super_block *sb, int mode) 304 { 305 struct inode *inode = new_inode(sb); 306 if (!inode) 307 return NULL; 308 309 inode->i_ino = HFSPLUS_SB(sb).next_cnid++; 310 inode->i_mode = mode; 311 inode->i_uid = current->fsuid; 312 inode->i_gid = current->fsgid; 313 inode->i_nlink = 1; 314 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC; 315 INIT_LIST_HEAD(&HFSPLUS_I(inode).open_dir_list); 316 init_MUTEX(&HFSPLUS_I(inode).extents_lock); 317 atomic_set(&HFSPLUS_I(inode).opencnt, 0); 318 HFSPLUS_I(inode).flags = 0; 319 memset(HFSPLUS_I(inode).first_extents, 0, sizeof(hfsplus_extent_rec)); 320 memset(HFSPLUS_I(inode).cached_extents, 0, sizeof(hfsplus_extent_rec)); 321 HFSPLUS_I(inode).alloc_blocks = 0; 322 HFSPLUS_I(inode).first_blocks = 0; 323 HFSPLUS_I(inode).cached_start = 0; 324 HFSPLUS_I(inode).cached_blocks = 0; 325 HFSPLUS_I(inode).phys_size = 0; 326 HFSPLUS_I(inode).fs_blocks = 0; 327 HFSPLUS_I(inode).rsrc_inode = NULL; 328 if (S_ISDIR(inode->i_mode)) { 329 inode->i_size = 2; 330 HFSPLUS_SB(sb).folder_count++; 331 inode->i_op = &hfsplus_dir_inode_operations; 332 inode->i_fop = &hfsplus_dir_operations; 333 } else if (S_ISREG(inode->i_mode)) { 334 HFSPLUS_SB(sb).file_count++; 335 inode->i_op = &hfsplus_file_inode_operations; 336 inode->i_fop = &hfsplus_file_operations; 337 inode->i_mapping->a_ops = &hfsplus_aops; 338 HFSPLUS_I(inode).clump_blocks = HFSPLUS_SB(sb).data_clump_blocks; 339 } else if (S_ISLNK(inode->i_mode)) { 340 HFSPLUS_SB(sb).file_count++; 341 inode->i_op = &page_symlink_inode_operations; 342 inode->i_mapping->a_ops = &hfsplus_aops; 343 HFSPLUS_I(inode).clump_blocks = 1; 344 } else 345 HFSPLUS_SB(sb).file_count++; 346 insert_inode_hash(inode); 347 mark_inode_dirty(inode); 348 sb->s_dirt = 1; 349 350 return inode; 351 } 352 353 void hfsplus_delete_inode(struct inode *inode) 354 { 355 struct super_block *sb = inode->i_sb; 356 357 if (S_ISDIR(inode->i_mode)) { 358 HFSPLUS_SB(sb).folder_count--; 359 sb->s_dirt = 1; 360 return; 361 } 362 HFSPLUS_SB(sb).file_count--; 363 if (S_ISREG(inode->i_mode)) { 364 if (!inode->i_nlink) { 365 inode->i_size = 0; 366 hfsplus_file_truncate(inode); 367 } 368 } else if (S_ISLNK(inode->i_mode)) { 369 inode->i_size = 0; 370 hfsplus_file_truncate(inode); 371 } 372 sb->s_dirt = 1; 373 } 374 375 void hfsplus_inode_read_fork(struct inode *inode, struct hfsplus_fork_raw *fork) 376 { 377 struct super_block *sb = inode->i_sb; 378 u32 count; 379 int i; 380 381 memcpy(&HFSPLUS_I(inode).first_extents, &fork->extents, 382 sizeof(hfsplus_extent_rec)); 383 for (count = 0, i = 0; i < 8; i++) 384 count += be32_to_cpu(fork->extents[i].block_count); 385 HFSPLUS_I(inode).first_blocks = count; 386 memset(HFSPLUS_I(inode).cached_extents, 0, sizeof(hfsplus_extent_rec)); 387 HFSPLUS_I(inode).cached_start = 0; 388 HFSPLUS_I(inode).cached_blocks = 0; 389 390 HFSPLUS_I(inode).alloc_blocks = be32_to_cpu(fork->total_blocks); 391 inode->i_size = HFSPLUS_I(inode).phys_size = be64_to_cpu(fork->total_size); 392 HFSPLUS_I(inode).fs_blocks = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits; 393 inode_set_bytes(inode, HFSPLUS_I(inode).fs_blocks << sb->s_blocksize_bits); 394 HFSPLUS_I(inode).clump_blocks = be32_to_cpu(fork->clump_size) >> HFSPLUS_SB(sb).alloc_blksz_shift; 395 if (!HFSPLUS_I(inode).clump_blocks) 396 HFSPLUS_I(inode).clump_blocks = HFSPLUS_IS_RSRC(inode) ? HFSPLUS_SB(sb).rsrc_clump_blocks : 397 HFSPLUS_SB(sb).data_clump_blocks; 398 } 399 400 void hfsplus_inode_write_fork(struct inode *inode, struct hfsplus_fork_raw *fork) 401 { 402 memcpy(&fork->extents, &HFSPLUS_I(inode).first_extents, 403 sizeof(hfsplus_extent_rec)); 404 fork->total_size = cpu_to_be64(inode->i_size); 405 fork->total_blocks = cpu_to_be32(HFSPLUS_I(inode).alloc_blocks); 406 } 407 408 int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd) 409 { 410 hfsplus_cat_entry entry; 411 int res = 0; 412 u16 type; 413 414 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset); 415 416 HFSPLUS_I(inode).dev = 0; 417 if (type == HFSPLUS_FOLDER) { 418 struct hfsplus_cat_folder *folder = &entry.folder; 419 420 if (fd->entrylength < sizeof(struct hfsplus_cat_folder)) 421 /* panic? */; 422 hfs_bnode_read(fd->bnode, &entry, fd->entryoffset, 423 sizeof(struct hfsplus_cat_folder)); 424 hfsplus_get_perms(inode, &folder->permissions, 1); 425 inode->i_nlink = 1; 426 inode->i_size = 2 + be32_to_cpu(folder->valence); 427 inode->i_atime = hfsp_mt2ut(folder->access_date); 428 inode->i_mtime = hfsp_mt2ut(folder->content_mod_date); 429 inode->i_ctime = hfsp_mt2ut(folder->attribute_mod_date); 430 HFSPLUS_I(inode).create_date = folder->create_date; 431 HFSPLUS_I(inode).fs_blocks = 0; 432 inode->i_op = &hfsplus_dir_inode_operations; 433 inode->i_fop = &hfsplus_dir_operations; 434 } else if (type == HFSPLUS_FILE) { 435 struct hfsplus_cat_file *file = &entry.file; 436 437 if (fd->entrylength < sizeof(struct hfsplus_cat_file)) 438 /* panic? */; 439 hfs_bnode_read(fd->bnode, &entry, fd->entryoffset, 440 sizeof(struct hfsplus_cat_file)); 441 442 hfsplus_inode_read_fork(inode, HFSPLUS_IS_DATA(inode) ? 443 &file->data_fork : &file->rsrc_fork); 444 hfsplus_get_perms(inode, &file->permissions, 0); 445 inode->i_nlink = 1; 446 if (S_ISREG(inode->i_mode)) { 447 if (file->permissions.dev) 448 inode->i_nlink = be32_to_cpu(file->permissions.dev); 449 inode->i_op = &hfsplus_file_inode_operations; 450 inode->i_fop = &hfsplus_file_operations; 451 inode->i_mapping->a_ops = &hfsplus_aops; 452 } else if (S_ISLNK(inode->i_mode)) { 453 inode->i_op = &page_symlink_inode_operations; 454 inode->i_mapping->a_ops = &hfsplus_aops; 455 } else { 456 init_special_inode(inode, inode->i_mode, 457 be32_to_cpu(file->permissions.dev)); 458 } 459 inode->i_atime = hfsp_mt2ut(file->access_date); 460 inode->i_mtime = hfsp_mt2ut(file->content_mod_date); 461 inode->i_ctime = hfsp_mt2ut(file->attribute_mod_date); 462 HFSPLUS_I(inode).create_date = file->create_date; 463 } else { 464 printk(KERN_ERR "hfs: bad catalog entry used to create inode\n"); 465 res = -EIO; 466 } 467 return res; 468 } 469 470 int hfsplus_cat_write_inode(struct inode *inode) 471 { 472 struct inode *main_inode = inode; 473 struct hfs_find_data fd; 474 hfsplus_cat_entry entry; 475 476 if (HFSPLUS_IS_RSRC(inode)) 477 main_inode = HFSPLUS_I(inode).rsrc_inode; 478 479 if (!main_inode->i_nlink) 480 return 0; 481 482 if (hfs_find_init(HFSPLUS_SB(main_inode->i_sb).cat_tree, &fd)) 483 /* panic? */ 484 return -EIO; 485 486 if (hfsplus_find_cat(main_inode->i_sb, main_inode->i_ino, &fd)) 487 /* panic? */ 488 goto out; 489 490 if (S_ISDIR(main_inode->i_mode)) { 491 struct hfsplus_cat_folder *folder = &entry.folder; 492 493 if (fd.entrylength < sizeof(struct hfsplus_cat_folder)) 494 /* panic? */; 495 hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, 496 sizeof(struct hfsplus_cat_folder)); 497 /* simple node checks? */ 498 hfsplus_set_perms(inode, &folder->permissions); 499 folder->access_date = hfsp_ut2mt(inode->i_atime); 500 folder->content_mod_date = hfsp_ut2mt(inode->i_mtime); 501 folder->attribute_mod_date = hfsp_ut2mt(inode->i_ctime); 502 folder->valence = cpu_to_be32(inode->i_size - 2); 503 hfs_bnode_write(fd.bnode, &entry, fd.entryoffset, 504 sizeof(struct hfsplus_cat_folder)); 505 } else if (HFSPLUS_IS_RSRC(inode)) { 506 struct hfsplus_cat_file *file = &entry.file; 507 hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, 508 sizeof(struct hfsplus_cat_file)); 509 hfsplus_inode_write_fork(inode, &file->rsrc_fork); 510 hfs_bnode_write(fd.bnode, &entry, fd.entryoffset, 511 sizeof(struct hfsplus_cat_file)); 512 } else { 513 struct hfsplus_cat_file *file = &entry.file; 514 515 if (fd.entrylength < sizeof(struct hfsplus_cat_file)) 516 /* panic? */; 517 hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, 518 sizeof(struct hfsplus_cat_file)); 519 hfsplus_inode_write_fork(inode, &file->data_fork); 520 if (S_ISREG(inode->i_mode)) 521 HFSPLUS_I(inode).dev = inode->i_nlink; 522 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) 523 HFSPLUS_I(inode).dev = kdev_t_to_nr(inode->i_rdev); 524 hfsplus_set_perms(inode, &file->permissions); 525 if ((file->permissions.rootflags | file->permissions.userflags) & HFSPLUS_FLG_IMMUTABLE) 526 file->flags |= cpu_to_be16(HFSPLUS_FILE_LOCKED); 527 else 528 file->flags &= cpu_to_be16(~HFSPLUS_FILE_LOCKED); 529 file->access_date = hfsp_ut2mt(inode->i_atime); 530 file->content_mod_date = hfsp_ut2mt(inode->i_mtime); 531 file->attribute_mod_date = hfsp_ut2mt(inode->i_ctime); 532 hfs_bnode_write(fd.bnode, &entry, fd.entryoffset, 533 sizeof(struct hfsplus_cat_file)); 534 } 535 out: 536 hfs_find_exit(&fd); 537 return 0; 538 } 539