1 /* 2 * linux/fs/hfsplus/inode.c 3 * 4 * Copyright (C) 2001 5 * Brad Boyer (flar@allandria.com) 6 * (C) 2003 Ardis Technologies <roman@ardistech.com> 7 * 8 * Inode handling routines 9 */ 10 11 #include <linux/mm.h> 12 #include <linux/fs.h> 13 #include <linux/pagemap.h> 14 #include <linux/version.h> 15 #include <linux/mpage.h> 16 17 #include "hfsplus_fs.h" 18 #include "hfsplus_raw.h" 19 20 static int hfsplus_readpage(struct file *file, struct page *page) 21 { 22 //printk("readpage: %lu\n", page->index); 23 return block_read_full_page(page, hfsplus_get_block); 24 } 25 26 static int hfsplus_writepage(struct page *page, struct writeback_control *wbc) 27 { 28 //printk("writepage: %lu\n", page->index); 29 return block_write_full_page(page, hfsplus_get_block, wbc); 30 } 31 32 static int hfsplus_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to) 33 { 34 return cont_prepare_write(page, from, to, hfsplus_get_block, 35 &HFSPLUS_I(page->mapping->host).phys_size); 36 } 37 38 static sector_t hfsplus_bmap(struct address_space *mapping, sector_t block) 39 { 40 return generic_block_bmap(mapping, block, hfsplus_get_block); 41 } 42 43 static int hfsplus_releasepage(struct page *page, gfp_t mask) 44 { 45 struct inode *inode = page->mapping->host; 46 struct super_block *sb = inode->i_sb; 47 struct hfs_btree *tree; 48 struct hfs_bnode *node; 49 u32 nidx; 50 int i, res = 1; 51 52 switch (inode->i_ino) { 53 case HFSPLUS_EXT_CNID: 54 tree = HFSPLUS_SB(sb).ext_tree; 55 break; 56 case HFSPLUS_CAT_CNID: 57 tree = HFSPLUS_SB(sb).cat_tree; 58 break; 59 case HFSPLUS_ATTR_CNID: 60 tree = HFSPLUS_SB(sb).attr_tree; 61 break; 62 default: 63 BUG(); 64 return 0; 65 } 66 if (tree->node_size >= PAGE_CACHE_SIZE) { 67 nidx = page->index >> (tree->node_size_shift - PAGE_CACHE_SHIFT); 68 spin_lock(&tree->hash_lock); 69 node = hfs_bnode_findhash(tree, nidx); 70 if (!node) 71 ; 72 else if (atomic_read(&node->refcnt)) 73 res = 0; 74 if (res && node) { 75 hfs_bnode_unhash(node); 76 hfs_bnode_free(node); 77 } 78 spin_unlock(&tree->hash_lock); 79 } else { 80 nidx = page->index << (PAGE_CACHE_SHIFT - tree->node_size_shift); 81 i = 1 << (PAGE_CACHE_SHIFT - tree->node_size_shift); 82 spin_lock(&tree->hash_lock); 83 do { 84 node = hfs_bnode_findhash(tree, nidx++); 85 if (!node) 86 continue; 87 if (atomic_read(&node->refcnt)) { 88 res = 0; 89 break; 90 } 91 hfs_bnode_unhash(node); 92 hfs_bnode_free(node); 93 } while (--i && nidx < tree->node_count); 94 spin_unlock(&tree->hash_lock); 95 } 96 //printk("releasepage: %lu,%x = %d\n", page->index, mask, res); 97 return res ? try_to_free_buffers(page) : 0; 98 } 99 100 static int hfsplus_get_blocks(struct inode *inode, sector_t iblock, unsigned long max_blocks, 101 struct buffer_head *bh_result, int create) 102 { 103 int ret; 104 105 ret = hfsplus_get_block(inode, iblock, bh_result, create); 106 if (!ret) 107 bh_result->b_size = (1 << inode->i_blkbits); 108 return ret; 109 } 110 111 static ssize_t hfsplus_direct_IO(int rw, struct kiocb *iocb, 112 const struct iovec *iov, loff_t offset, unsigned long nr_segs) 113 { 114 struct file *file = iocb->ki_filp; 115 struct inode *inode = file->f_dentry->d_inode->i_mapping->host; 116 117 return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, 118 offset, nr_segs, hfsplus_get_blocks, NULL); 119 } 120 121 static int hfsplus_writepages(struct address_space *mapping, 122 struct writeback_control *wbc) 123 { 124 return mpage_writepages(mapping, wbc, hfsplus_get_block); 125 } 126 127 struct address_space_operations hfsplus_btree_aops = { 128 .readpage = hfsplus_readpage, 129 .writepage = hfsplus_writepage, 130 .sync_page = block_sync_page, 131 .prepare_write = hfsplus_prepare_write, 132 .commit_write = generic_commit_write, 133 .bmap = hfsplus_bmap, 134 .releasepage = hfsplus_releasepage, 135 }; 136 137 struct address_space_operations hfsplus_aops = { 138 .readpage = hfsplus_readpage, 139 .writepage = hfsplus_writepage, 140 .sync_page = block_sync_page, 141 .prepare_write = hfsplus_prepare_write, 142 .commit_write = generic_commit_write, 143 .bmap = hfsplus_bmap, 144 .direct_IO = hfsplus_direct_IO, 145 .writepages = hfsplus_writepages, 146 }; 147 148 static struct dentry *hfsplus_file_lookup(struct inode *dir, struct dentry *dentry, 149 struct nameidata *nd) 150 { 151 struct hfs_find_data fd; 152 struct super_block *sb = dir->i_sb; 153 struct inode *inode = NULL; 154 int err; 155 156 if (HFSPLUS_IS_RSRC(dir) || strcmp(dentry->d_name.name, "rsrc")) 157 goto out; 158 159 inode = HFSPLUS_I(dir).rsrc_inode; 160 if (inode) 161 goto out; 162 163 inode = new_inode(sb); 164 if (!inode) 165 return ERR_PTR(-ENOMEM); 166 167 inode->i_ino = dir->i_ino; 168 INIT_LIST_HEAD(&HFSPLUS_I(inode).open_dir_list); 169 init_MUTEX(&HFSPLUS_I(inode).extents_lock); 170 HFSPLUS_I(inode).flags = HFSPLUS_FLG_RSRC; 171 172 hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd); 173 err = hfsplus_find_cat(sb, dir->i_ino, &fd); 174 if (!err) 175 err = hfsplus_cat_read_inode(inode, &fd); 176 hfs_find_exit(&fd); 177 if (err) { 178 iput(inode); 179 return ERR_PTR(err); 180 } 181 HFSPLUS_I(inode).rsrc_inode = dir; 182 HFSPLUS_I(dir).rsrc_inode = inode; 183 igrab(dir); 184 hlist_add_head(&inode->i_hash, &HFSPLUS_SB(sb).rsrc_inodes); 185 mark_inode_dirty(inode); 186 { 187 void hfsplus_inode_check(struct super_block *sb); 188 atomic_inc(&HFSPLUS_SB(sb).inode_cnt); 189 hfsplus_inode_check(sb); 190 } 191 out: 192 d_add(dentry, inode); 193 return NULL; 194 } 195 196 static void hfsplus_get_perms(struct inode *inode, struct hfsplus_perm *perms, int dir) 197 { 198 struct super_block *sb = inode->i_sb; 199 u16 mode; 200 201 mode = be16_to_cpu(perms->mode); 202 203 inode->i_uid = be32_to_cpu(perms->owner); 204 if (!inode->i_uid && !mode) 205 inode->i_uid = HFSPLUS_SB(sb).uid; 206 207 inode->i_gid = be32_to_cpu(perms->group); 208 if (!inode->i_gid && !mode) 209 inode->i_gid = HFSPLUS_SB(sb).gid; 210 211 if (dir) { 212 mode = mode ? (mode & S_IALLUGO) : 213 (S_IRWXUGO & ~(HFSPLUS_SB(sb).umask)); 214 mode |= S_IFDIR; 215 } else if (!mode) 216 mode = S_IFREG | ((S_IRUGO|S_IWUGO) & 217 ~(HFSPLUS_SB(sb).umask)); 218 inode->i_mode = mode; 219 220 HFSPLUS_I(inode).rootflags = perms->rootflags; 221 HFSPLUS_I(inode).userflags = perms->userflags; 222 if (perms->rootflags & HFSPLUS_FLG_IMMUTABLE) 223 inode->i_flags |= S_IMMUTABLE; 224 else 225 inode->i_flags &= ~S_IMMUTABLE; 226 if (perms->rootflags & HFSPLUS_FLG_APPEND) 227 inode->i_flags |= S_APPEND; 228 else 229 inode->i_flags &= ~S_APPEND; 230 } 231 232 static void hfsplus_set_perms(struct inode *inode, struct hfsplus_perm *perms) 233 { 234 if (inode->i_flags & S_IMMUTABLE) 235 perms->rootflags |= HFSPLUS_FLG_IMMUTABLE; 236 else 237 perms->rootflags &= ~HFSPLUS_FLG_IMMUTABLE; 238 if (inode->i_flags & S_APPEND) 239 perms->rootflags |= HFSPLUS_FLG_APPEND; 240 else 241 perms->rootflags &= ~HFSPLUS_FLG_APPEND; 242 perms->userflags = HFSPLUS_I(inode).userflags; 243 perms->mode = cpu_to_be16(inode->i_mode); 244 perms->owner = cpu_to_be32(inode->i_uid); 245 perms->group = cpu_to_be32(inode->i_gid); 246 perms->dev = cpu_to_be32(HFSPLUS_I(inode).dev); 247 } 248 249 static int hfsplus_permission(struct inode *inode, int mask, struct nameidata *nd) 250 { 251 /* MAY_EXEC is also used for lookup, if no x bit is set allow lookup, 252 * open_exec has the same test, so it's still not executable, if a x bit 253 * is set fall back to standard permission check. 254 */ 255 if (S_ISREG(inode->i_mode) && mask & MAY_EXEC && !(inode->i_mode & 0111)) 256 return 0; 257 return generic_permission(inode, mask, NULL); 258 } 259 260 261 static int hfsplus_file_open(struct inode *inode, struct file *file) 262 { 263 if (HFSPLUS_IS_RSRC(inode)) 264 inode = HFSPLUS_I(inode).rsrc_inode; 265 if (atomic_read(&file->f_count) != 1) 266 return 0; 267 atomic_inc(&HFSPLUS_I(inode).opencnt); 268 return 0; 269 } 270 271 static int hfsplus_file_release(struct inode *inode, struct file *file) 272 { 273 struct super_block *sb = inode->i_sb; 274 275 if (HFSPLUS_IS_RSRC(inode)) 276 inode = HFSPLUS_I(inode).rsrc_inode; 277 if (atomic_read(&file->f_count) != 0) 278 return 0; 279 if (atomic_dec_and_test(&HFSPLUS_I(inode).opencnt)) { 280 down(&inode->i_sem); 281 hfsplus_file_truncate(inode); 282 if (inode->i_flags & S_DEAD) { 283 hfsplus_delete_cat(inode->i_ino, HFSPLUS_SB(sb).hidden_dir, NULL); 284 hfsplus_delete_inode(inode); 285 } 286 up(&inode->i_sem); 287 } 288 return 0; 289 } 290 291 extern struct inode_operations hfsplus_dir_inode_operations; 292 extern struct file_operations hfsplus_dir_operations; 293 294 static struct inode_operations hfsplus_file_inode_operations = { 295 .lookup = hfsplus_file_lookup, 296 .truncate = hfsplus_file_truncate, 297 .permission = hfsplus_permission, 298 .setxattr = hfsplus_setxattr, 299 .getxattr = hfsplus_getxattr, 300 .listxattr = hfsplus_listxattr, 301 }; 302 303 static struct file_operations hfsplus_file_operations = { 304 .llseek = generic_file_llseek, 305 .read = generic_file_read, 306 .write = generic_file_write, 307 .mmap = generic_file_mmap, 308 .sendfile = generic_file_sendfile, 309 .fsync = file_fsync, 310 .open = hfsplus_file_open, 311 .release = hfsplus_file_release, 312 .ioctl = hfsplus_ioctl, 313 }; 314 315 struct inode *hfsplus_new_inode(struct super_block *sb, int mode) 316 { 317 struct inode *inode = new_inode(sb); 318 if (!inode) 319 return NULL; 320 321 { 322 void hfsplus_inode_check(struct super_block *sb); 323 atomic_inc(&HFSPLUS_SB(sb).inode_cnt); 324 hfsplus_inode_check(sb); 325 } 326 inode->i_ino = HFSPLUS_SB(sb).next_cnid++; 327 inode->i_mode = mode; 328 inode->i_uid = current->fsuid; 329 inode->i_gid = current->fsgid; 330 inode->i_nlink = 1; 331 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC; 332 inode->i_blksize = HFSPLUS_SB(sb).alloc_blksz; 333 INIT_LIST_HEAD(&HFSPLUS_I(inode).open_dir_list); 334 init_MUTEX(&HFSPLUS_I(inode).extents_lock); 335 atomic_set(&HFSPLUS_I(inode).opencnt, 0); 336 HFSPLUS_I(inode).flags = 0; 337 memset(HFSPLUS_I(inode).first_extents, 0, sizeof(hfsplus_extent_rec)); 338 memset(HFSPLUS_I(inode).cached_extents, 0, sizeof(hfsplus_extent_rec)); 339 HFSPLUS_I(inode).alloc_blocks = 0; 340 HFSPLUS_I(inode).first_blocks = 0; 341 HFSPLUS_I(inode).cached_start = 0; 342 HFSPLUS_I(inode).cached_blocks = 0; 343 HFSPLUS_I(inode).phys_size = 0; 344 HFSPLUS_I(inode).fs_blocks = 0; 345 HFSPLUS_I(inode).rsrc_inode = NULL; 346 if (S_ISDIR(inode->i_mode)) { 347 inode->i_size = 2; 348 HFSPLUS_SB(sb).folder_count++; 349 inode->i_op = &hfsplus_dir_inode_operations; 350 inode->i_fop = &hfsplus_dir_operations; 351 } else if (S_ISREG(inode->i_mode)) { 352 HFSPLUS_SB(sb).file_count++; 353 inode->i_op = &hfsplus_file_inode_operations; 354 inode->i_fop = &hfsplus_file_operations; 355 inode->i_mapping->a_ops = &hfsplus_aops; 356 HFSPLUS_I(inode).clump_blocks = HFSPLUS_SB(sb).data_clump_blocks; 357 } else if (S_ISLNK(inode->i_mode)) { 358 HFSPLUS_SB(sb).file_count++; 359 inode->i_op = &page_symlink_inode_operations; 360 inode->i_mapping->a_ops = &hfsplus_aops; 361 HFSPLUS_I(inode).clump_blocks = 1; 362 } else 363 HFSPLUS_SB(sb).file_count++; 364 insert_inode_hash(inode); 365 mark_inode_dirty(inode); 366 sb->s_dirt = 1; 367 368 return inode; 369 } 370 371 void hfsplus_delete_inode(struct inode *inode) 372 { 373 struct super_block *sb = inode->i_sb; 374 375 if (S_ISDIR(inode->i_mode)) { 376 HFSPLUS_SB(sb).folder_count--; 377 sb->s_dirt = 1; 378 return; 379 } 380 HFSPLUS_SB(sb).file_count--; 381 if (S_ISREG(inode->i_mode)) { 382 if (!inode->i_nlink) { 383 inode->i_size = 0; 384 hfsplus_file_truncate(inode); 385 } 386 } else if (S_ISLNK(inode->i_mode)) { 387 inode->i_size = 0; 388 hfsplus_file_truncate(inode); 389 } 390 sb->s_dirt = 1; 391 } 392 393 void hfsplus_inode_read_fork(struct inode *inode, struct hfsplus_fork_raw *fork) 394 { 395 struct super_block *sb = inode->i_sb; 396 u32 count; 397 int i; 398 399 memcpy(&HFSPLUS_I(inode).first_extents, &fork->extents, 400 sizeof(hfsplus_extent_rec)); 401 for (count = 0, i = 0; i < 8; i++) 402 count += be32_to_cpu(fork->extents[i].block_count); 403 HFSPLUS_I(inode).first_blocks = count; 404 memset(HFSPLUS_I(inode).cached_extents, 0, sizeof(hfsplus_extent_rec)); 405 HFSPLUS_I(inode).cached_start = 0; 406 HFSPLUS_I(inode).cached_blocks = 0; 407 408 HFSPLUS_I(inode).alloc_blocks = be32_to_cpu(fork->total_blocks); 409 inode->i_size = HFSPLUS_I(inode).phys_size = be64_to_cpu(fork->total_size); 410 HFSPLUS_I(inode).fs_blocks = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits; 411 inode_set_bytes(inode, HFSPLUS_I(inode).fs_blocks << sb->s_blocksize_bits); 412 HFSPLUS_I(inode).clump_blocks = be32_to_cpu(fork->clump_size) >> HFSPLUS_SB(sb).alloc_blksz_shift; 413 if (!HFSPLUS_I(inode).clump_blocks) 414 HFSPLUS_I(inode).clump_blocks = HFSPLUS_IS_RSRC(inode) ? HFSPLUS_SB(sb).rsrc_clump_blocks : 415 HFSPLUS_SB(sb).data_clump_blocks; 416 } 417 418 void hfsplus_inode_write_fork(struct inode *inode, struct hfsplus_fork_raw *fork) 419 { 420 memcpy(&fork->extents, &HFSPLUS_I(inode).first_extents, 421 sizeof(hfsplus_extent_rec)); 422 fork->total_size = cpu_to_be64(inode->i_size); 423 fork->total_blocks = cpu_to_be32(HFSPLUS_I(inode).alloc_blocks); 424 } 425 426 int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd) 427 { 428 hfsplus_cat_entry entry; 429 int res = 0; 430 u16 type; 431 432 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset); 433 434 HFSPLUS_I(inode).dev = 0; 435 inode->i_blksize = HFSPLUS_SB(inode->i_sb).alloc_blksz; 436 if (type == HFSPLUS_FOLDER) { 437 struct hfsplus_cat_folder *folder = &entry.folder; 438 439 if (fd->entrylength < sizeof(struct hfsplus_cat_folder)) 440 /* panic? */; 441 hfs_bnode_read(fd->bnode, &entry, fd->entryoffset, 442 sizeof(struct hfsplus_cat_folder)); 443 hfsplus_get_perms(inode, &folder->permissions, 1); 444 inode->i_nlink = 1; 445 inode->i_size = 2 + be32_to_cpu(folder->valence); 446 inode->i_atime = hfsp_mt2ut(folder->access_date); 447 inode->i_mtime = hfsp_mt2ut(folder->content_mod_date); 448 inode->i_ctime = inode->i_mtime; 449 HFSPLUS_I(inode).fs_blocks = 0; 450 inode->i_op = &hfsplus_dir_inode_operations; 451 inode->i_fop = &hfsplus_dir_operations; 452 } else if (type == HFSPLUS_FILE) { 453 struct hfsplus_cat_file *file = &entry.file; 454 455 if (fd->entrylength < sizeof(struct hfsplus_cat_file)) 456 /* panic? */; 457 hfs_bnode_read(fd->bnode, &entry, fd->entryoffset, 458 sizeof(struct hfsplus_cat_file)); 459 460 hfsplus_inode_read_fork(inode, HFSPLUS_IS_DATA(inode) ? 461 &file->data_fork : &file->rsrc_fork); 462 hfsplus_get_perms(inode, &file->permissions, 0); 463 inode->i_nlink = 1; 464 if (S_ISREG(inode->i_mode)) { 465 if (file->permissions.dev) 466 inode->i_nlink = be32_to_cpu(file->permissions.dev); 467 inode->i_op = &hfsplus_file_inode_operations; 468 inode->i_fop = &hfsplus_file_operations; 469 inode->i_mapping->a_ops = &hfsplus_aops; 470 } else if (S_ISLNK(inode->i_mode)) { 471 inode->i_op = &page_symlink_inode_operations; 472 inode->i_mapping->a_ops = &hfsplus_aops; 473 } else { 474 init_special_inode(inode, inode->i_mode, 475 be32_to_cpu(file->permissions.dev)); 476 } 477 inode->i_atime = hfsp_mt2ut(file->access_date); 478 inode->i_mtime = hfsp_mt2ut(file->content_mod_date); 479 inode->i_ctime = inode->i_mtime; 480 } else { 481 printk("HFS+-fs: bad catalog entry used to create inode\n"); 482 res = -EIO; 483 } 484 return res; 485 } 486 487 int hfsplus_cat_write_inode(struct inode *inode) 488 { 489 struct inode *main_inode = inode; 490 struct hfs_find_data fd; 491 hfsplus_cat_entry entry; 492 493 if (HFSPLUS_IS_RSRC(inode)) 494 main_inode = HFSPLUS_I(inode).rsrc_inode; 495 496 if (!main_inode->i_nlink) 497 return 0; 498 499 if (hfs_find_init(HFSPLUS_SB(main_inode->i_sb).cat_tree, &fd)) 500 /* panic? */ 501 return -EIO; 502 503 if (hfsplus_find_cat(main_inode->i_sb, main_inode->i_ino, &fd)) 504 /* panic? */ 505 goto out; 506 507 if (S_ISDIR(main_inode->i_mode)) { 508 struct hfsplus_cat_folder *folder = &entry.folder; 509 510 if (fd.entrylength < sizeof(struct hfsplus_cat_folder)) 511 /* panic? */; 512 hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, 513 sizeof(struct hfsplus_cat_folder)); 514 /* simple node checks? */ 515 hfsplus_set_perms(inode, &folder->permissions); 516 folder->access_date = hfsp_ut2mt(inode->i_atime); 517 folder->content_mod_date = hfsp_ut2mt(inode->i_mtime); 518 folder->attribute_mod_date = hfsp_ut2mt(inode->i_ctime); 519 folder->valence = cpu_to_be32(inode->i_size - 2); 520 hfs_bnode_write(fd.bnode, &entry, fd.entryoffset, 521 sizeof(struct hfsplus_cat_folder)); 522 } else if (HFSPLUS_IS_RSRC(inode)) { 523 struct hfsplus_cat_file *file = &entry.file; 524 hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, 525 sizeof(struct hfsplus_cat_file)); 526 hfsplus_inode_write_fork(inode, &file->rsrc_fork); 527 hfs_bnode_write(fd.bnode, &entry, fd.entryoffset, 528 sizeof(struct hfsplus_cat_file)); 529 } else { 530 struct hfsplus_cat_file *file = &entry.file; 531 532 if (fd.entrylength < sizeof(struct hfsplus_cat_file)) 533 /* panic? */; 534 hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, 535 sizeof(struct hfsplus_cat_file)); 536 hfsplus_inode_write_fork(inode, &file->data_fork); 537 if (S_ISREG(inode->i_mode)) 538 HFSPLUS_I(inode).dev = inode->i_nlink; 539 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) 540 HFSPLUS_I(inode).dev = kdev_t_to_nr(inode->i_rdev); 541 hfsplus_set_perms(inode, &file->permissions); 542 if ((file->permissions.rootflags | file->permissions.userflags) & HFSPLUS_FLG_IMMUTABLE) 543 file->flags |= cpu_to_be16(HFSPLUS_FILE_LOCKED); 544 else 545 file->flags &= cpu_to_be16(~HFSPLUS_FILE_LOCKED); 546 file->access_date = hfsp_ut2mt(inode->i_atime); 547 file->content_mod_date = hfsp_ut2mt(inode->i_mtime); 548 file->attribute_mod_date = hfsp_ut2mt(inode->i_ctime); 549 hfs_bnode_write(fd.bnode, &entry, fd.entryoffset, 550 sizeof(struct hfsplus_cat_file)); 551 } 552 out: 553 hfs_find_exit(&fd); 554 return 0; 555 } 556