1 /* 2 * Compressed rom filesystem for Linux. 3 * 4 * Copyright (C) 1999 Linus Torvalds. 5 * 6 * This file is released under the GPL. 7 */ 8 9 /* 10 * These are the VFS interfaces to the compressed rom filesystem. 11 * The actual compression is based on zlib, see the other files. 12 */ 13 14 #include <linux/module.h> 15 #include <linux/fs.h> 16 #include <linux/pagemap.h> 17 #include <linux/init.h> 18 #include <linux/string.h> 19 #include <linux/blkdev.h> 20 #include <linux/cramfs_fs.h> 21 #include <linux/slab.h> 22 #include <linux/cramfs_fs_sb.h> 23 #include <linux/buffer_head.h> 24 #include <linux/vfs.h> 25 #include <linux/mutex.h> 26 #include <asm/semaphore.h> 27 28 #include <asm/uaccess.h> 29 30 static const struct super_operations cramfs_ops; 31 static const struct inode_operations cramfs_dir_inode_operations; 32 static const struct file_operations cramfs_directory_operations; 33 static const struct address_space_operations cramfs_aops; 34 35 static DEFINE_MUTEX(read_mutex); 36 37 38 /* These two macros may change in future, to provide better st_ino 39 semantics. */ 40 #define CRAMINO(x) (((x)->offset && (x)->size)?(x)->offset<<2:1) 41 #define OFFSET(x) ((x)->i_ino) 42 43 44 static int cramfs_iget5_test(struct inode *inode, void *opaque) 45 { 46 struct cramfs_inode *cramfs_inode = opaque; 47 48 if (inode->i_ino != CRAMINO(cramfs_inode)) 49 return 0; /* does not match */ 50 51 if (inode->i_ino != 1) 52 return 1; 53 54 /* all empty directories, char, block, pipe, and sock, share inode #1 */ 55 56 if ((inode->i_mode != cramfs_inode->mode) || 57 (inode->i_gid != cramfs_inode->gid) || 58 (inode->i_uid != cramfs_inode->uid)) 59 return 0; /* does not match */ 60 61 if ((S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) && 62 (inode->i_rdev != old_decode_dev(cramfs_inode->size))) 63 return 0; /* does not match */ 64 65 return 1; /* matches */ 66 } 67 68 static int cramfs_iget5_set(struct inode *inode, void *opaque) 69 { 70 static struct timespec zerotime; 71 struct cramfs_inode *cramfs_inode = opaque; 72 inode->i_mode = cramfs_inode->mode; 73 inode->i_uid = cramfs_inode->uid; 74 inode->i_size = cramfs_inode->size; 75 inode->i_blocks = (cramfs_inode->size - 1) / 512 + 1; 76 inode->i_gid = cramfs_inode->gid; 77 /* Struct copy intentional */ 78 inode->i_mtime = inode->i_atime = inode->i_ctime = zerotime; 79 inode->i_ino = CRAMINO(cramfs_inode); 80 /* inode->i_nlink is left 1 - arguably wrong for directories, 81 but it's the best we can do without reading the directory 82 contents. 1 yields the right result in GNU find, even 83 without -noleaf option. */ 84 if (S_ISREG(inode->i_mode)) { 85 inode->i_fop = &generic_ro_fops; 86 inode->i_data.a_ops = &cramfs_aops; 87 } else if (S_ISDIR(inode->i_mode)) { 88 inode->i_op = &cramfs_dir_inode_operations; 89 inode->i_fop = &cramfs_directory_operations; 90 } else if (S_ISLNK(inode->i_mode)) { 91 inode->i_op = &page_symlink_inode_operations; 92 inode->i_data.a_ops = &cramfs_aops; 93 } else { 94 inode->i_size = 0; 95 inode->i_blocks = 0; 96 init_special_inode(inode, inode->i_mode, 97 old_decode_dev(cramfs_inode->size)); 98 } 99 return 0; 100 } 101 102 static struct inode *get_cramfs_inode(struct super_block *sb, 103 struct cramfs_inode * cramfs_inode) 104 { 105 struct inode *inode = iget5_locked(sb, CRAMINO(cramfs_inode), 106 cramfs_iget5_test, cramfs_iget5_set, 107 cramfs_inode); 108 if (inode && (inode->i_state & I_NEW)) { 109 unlock_new_inode(inode); 110 } 111 return inode; 112 } 113 114 /* 115 * We have our own block cache: don't fill up the buffer cache 116 * with the rom-image, because the way the filesystem is set 117 * up the accesses should be fairly regular and cached in the 118 * page cache and dentry tree anyway.. 119 * 120 * This also acts as a way to guarantee contiguous areas of up to 121 * BLKS_PER_BUF*PAGE_CACHE_SIZE, so that the caller doesn't need to 122 * worry about end-of-buffer issues even when decompressing a full 123 * page cache. 124 */ 125 #define READ_BUFFERS (2) 126 /* NEXT_BUFFER(): Loop over [0..(READ_BUFFERS-1)]. */ 127 #define NEXT_BUFFER(_ix) ((_ix) ^ 1) 128 129 /* 130 * BLKS_PER_BUF_SHIFT should be at least 2 to allow for "compressed" 131 * data that takes up more space than the original and with unlucky 132 * alignment. 133 */ 134 #define BLKS_PER_BUF_SHIFT (2) 135 #define BLKS_PER_BUF (1 << BLKS_PER_BUF_SHIFT) 136 #define BUFFER_SIZE (BLKS_PER_BUF*PAGE_CACHE_SIZE) 137 138 static unsigned char read_buffers[READ_BUFFERS][BUFFER_SIZE]; 139 static unsigned buffer_blocknr[READ_BUFFERS]; 140 static struct super_block * buffer_dev[READ_BUFFERS]; 141 static int next_buffer; 142 143 /* 144 * Returns a pointer to a buffer containing at least LEN bytes of 145 * filesystem starting at byte offset OFFSET into the filesystem. 146 */ 147 static void *cramfs_read(struct super_block *sb, unsigned int offset, unsigned int len) 148 { 149 struct address_space *mapping = sb->s_bdev->bd_inode->i_mapping; 150 struct page *pages[BLKS_PER_BUF]; 151 unsigned i, blocknr, buffer; 152 unsigned long devsize; 153 char *data; 154 155 if (!len) 156 return NULL; 157 blocknr = offset >> PAGE_CACHE_SHIFT; 158 offset &= PAGE_CACHE_SIZE - 1; 159 160 /* Check if an existing buffer already has the data.. */ 161 for (i = 0; i < READ_BUFFERS; i++) { 162 unsigned int blk_offset; 163 164 if (buffer_dev[i] != sb) 165 continue; 166 if (blocknr < buffer_blocknr[i]) 167 continue; 168 blk_offset = (blocknr - buffer_blocknr[i]) << PAGE_CACHE_SHIFT; 169 blk_offset += offset; 170 if (blk_offset + len > BUFFER_SIZE) 171 continue; 172 return read_buffers[i] + blk_offset; 173 } 174 175 devsize = mapping->host->i_size >> PAGE_CACHE_SHIFT; 176 177 /* Ok, read in BLKS_PER_BUF pages completely first. */ 178 for (i = 0; i < BLKS_PER_BUF; i++) { 179 struct page *page = NULL; 180 181 if (blocknr + i < devsize) { 182 page = read_mapping_page_async(mapping, blocknr + i, 183 NULL); 184 /* synchronous error? */ 185 if (IS_ERR(page)) 186 page = NULL; 187 } 188 pages[i] = page; 189 } 190 191 for (i = 0; i < BLKS_PER_BUF; i++) { 192 struct page *page = pages[i]; 193 if (page) { 194 wait_on_page_locked(page); 195 if (!PageUptodate(page)) { 196 /* asynchronous error */ 197 page_cache_release(page); 198 pages[i] = NULL; 199 } 200 } 201 } 202 203 buffer = next_buffer; 204 next_buffer = NEXT_BUFFER(buffer); 205 buffer_blocknr[buffer] = blocknr; 206 buffer_dev[buffer] = sb; 207 208 data = read_buffers[buffer]; 209 for (i = 0; i < BLKS_PER_BUF; i++) { 210 struct page *page = pages[i]; 211 if (page) { 212 memcpy(data, kmap(page), PAGE_CACHE_SIZE); 213 kunmap(page); 214 page_cache_release(page); 215 } else 216 memset(data, 0, PAGE_CACHE_SIZE); 217 data += PAGE_CACHE_SIZE; 218 } 219 return read_buffers[buffer] + offset; 220 } 221 222 static void cramfs_put_super(struct super_block *sb) 223 { 224 kfree(sb->s_fs_info); 225 sb->s_fs_info = NULL; 226 } 227 228 static int cramfs_remount(struct super_block *sb, int *flags, char *data) 229 { 230 *flags |= MS_RDONLY; 231 return 0; 232 } 233 234 static int cramfs_fill_super(struct super_block *sb, void *data, int silent) 235 { 236 int i; 237 struct cramfs_super super; 238 unsigned long root_offset; 239 struct cramfs_sb_info *sbi; 240 struct inode *root; 241 242 sb->s_flags |= MS_RDONLY; 243 244 sbi = kzalloc(sizeof(struct cramfs_sb_info), GFP_KERNEL); 245 if (!sbi) 246 return -ENOMEM; 247 sb->s_fs_info = sbi; 248 249 /* Invalidate the read buffers on mount: think disk change.. */ 250 mutex_lock(&read_mutex); 251 for (i = 0; i < READ_BUFFERS; i++) 252 buffer_blocknr[i] = -1; 253 254 /* Read the first block and get the superblock from it */ 255 memcpy(&super, cramfs_read(sb, 0, sizeof(super)), sizeof(super)); 256 mutex_unlock(&read_mutex); 257 258 /* Do sanity checks on the superblock */ 259 if (super.magic != CRAMFS_MAGIC) { 260 /* check for wrong endianess */ 261 if (super.magic == CRAMFS_MAGIC_WEND) { 262 if (!silent) 263 printk(KERN_ERR "cramfs: wrong endianess\n"); 264 goto out; 265 } 266 267 /* check at 512 byte offset */ 268 mutex_lock(&read_mutex); 269 memcpy(&super, cramfs_read(sb, 512, sizeof(super)), sizeof(super)); 270 mutex_unlock(&read_mutex); 271 if (super.magic != CRAMFS_MAGIC) { 272 if (super.magic == CRAMFS_MAGIC_WEND && !silent) 273 printk(KERN_ERR "cramfs: wrong endianess\n"); 274 else if (!silent) 275 printk(KERN_ERR "cramfs: wrong magic\n"); 276 goto out; 277 } 278 } 279 280 /* get feature flags first */ 281 if (super.flags & ~CRAMFS_SUPPORTED_FLAGS) { 282 printk(KERN_ERR "cramfs: unsupported filesystem features\n"); 283 goto out; 284 } 285 286 /* Check that the root inode is in a sane state */ 287 if (!S_ISDIR(super.root.mode)) { 288 printk(KERN_ERR "cramfs: root is not a directory\n"); 289 goto out; 290 } 291 root_offset = super.root.offset << 2; 292 if (super.flags & CRAMFS_FLAG_FSID_VERSION_2) { 293 sbi->size=super.size; 294 sbi->blocks=super.fsid.blocks; 295 sbi->files=super.fsid.files; 296 } else { 297 sbi->size=1<<28; 298 sbi->blocks=0; 299 sbi->files=0; 300 } 301 sbi->magic=super.magic; 302 sbi->flags=super.flags; 303 if (root_offset == 0) 304 printk(KERN_INFO "cramfs: empty filesystem"); 305 else if (!(super.flags & CRAMFS_FLAG_SHIFTED_ROOT_OFFSET) && 306 ((root_offset != sizeof(struct cramfs_super)) && 307 (root_offset != 512 + sizeof(struct cramfs_super)))) 308 { 309 printk(KERN_ERR "cramfs: bad root offset %lu\n", root_offset); 310 goto out; 311 } 312 313 /* Set it all up.. */ 314 sb->s_op = &cramfs_ops; 315 root = get_cramfs_inode(sb, &super.root); 316 if (!root) 317 goto out; 318 sb->s_root = d_alloc_root(root); 319 if (!sb->s_root) { 320 iput(root); 321 goto out; 322 } 323 return 0; 324 out: 325 kfree(sbi); 326 sb->s_fs_info = NULL; 327 return -EINVAL; 328 } 329 330 static int cramfs_statfs(struct dentry *dentry, struct kstatfs *buf) 331 { 332 struct super_block *sb = dentry->d_sb; 333 334 buf->f_type = CRAMFS_MAGIC; 335 buf->f_bsize = PAGE_CACHE_SIZE; 336 buf->f_blocks = CRAMFS_SB(sb)->blocks; 337 buf->f_bfree = 0; 338 buf->f_bavail = 0; 339 buf->f_files = CRAMFS_SB(sb)->files; 340 buf->f_ffree = 0; 341 buf->f_namelen = CRAMFS_MAXPATHLEN; 342 return 0; 343 } 344 345 /* 346 * Read a cramfs directory entry. 347 */ 348 static int cramfs_readdir(struct file *filp, void *dirent, filldir_t filldir) 349 { 350 struct inode *inode = filp->f_path.dentry->d_inode; 351 struct super_block *sb = inode->i_sb; 352 char *buf; 353 unsigned int offset; 354 int copied; 355 356 /* Offset within the thing. */ 357 offset = filp->f_pos; 358 if (offset >= inode->i_size) 359 return 0; 360 /* Directory entries are always 4-byte aligned */ 361 if (offset & 3) 362 return -EINVAL; 363 364 buf = kmalloc(CRAMFS_MAXPATHLEN, GFP_KERNEL); 365 if (!buf) 366 return -ENOMEM; 367 368 copied = 0; 369 while (offset < inode->i_size) { 370 struct cramfs_inode *de; 371 unsigned long nextoffset; 372 char *name; 373 ino_t ino; 374 mode_t mode; 375 int namelen, error; 376 377 mutex_lock(&read_mutex); 378 de = cramfs_read(sb, OFFSET(inode) + offset, sizeof(*de)+CRAMFS_MAXPATHLEN); 379 name = (char *)(de+1); 380 381 /* 382 * Namelengths on disk are shifted by two 383 * and the name padded out to 4-byte boundaries 384 * with zeroes. 385 */ 386 namelen = de->namelen << 2; 387 memcpy(buf, name, namelen); 388 ino = CRAMINO(de); 389 mode = de->mode; 390 mutex_unlock(&read_mutex); 391 nextoffset = offset + sizeof(*de) + namelen; 392 for (;;) { 393 if (!namelen) { 394 kfree(buf); 395 return -EIO; 396 } 397 if (buf[namelen-1]) 398 break; 399 namelen--; 400 } 401 error = filldir(dirent, buf, namelen, offset, ino, mode >> 12); 402 if (error) 403 break; 404 405 offset = nextoffset; 406 filp->f_pos = offset; 407 copied++; 408 } 409 kfree(buf); 410 return 0; 411 } 412 413 /* 414 * Lookup and fill in the inode data.. 415 */ 416 static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) 417 { 418 unsigned int offset = 0; 419 int sorted; 420 421 mutex_lock(&read_mutex); 422 sorted = CRAMFS_SB(dir->i_sb)->flags & CRAMFS_FLAG_SORTED_DIRS; 423 while (offset < dir->i_size) { 424 struct cramfs_inode *de; 425 char *name; 426 int namelen, retval; 427 428 de = cramfs_read(dir->i_sb, OFFSET(dir) + offset, sizeof(*de)+CRAMFS_MAXPATHLEN); 429 name = (char *)(de+1); 430 431 /* Try to take advantage of sorted directories */ 432 if (sorted && (dentry->d_name.name[0] < name[0])) 433 break; 434 435 namelen = de->namelen << 2; 436 offset += sizeof(*de) + namelen; 437 438 /* Quick check that the name is roughly the right length */ 439 if (((dentry->d_name.len + 3) & ~3) != namelen) 440 continue; 441 442 for (;;) { 443 if (!namelen) { 444 mutex_unlock(&read_mutex); 445 return ERR_PTR(-EIO); 446 } 447 if (name[namelen-1]) 448 break; 449 namelen--; 450 } 451 if (namelen != dentry->d_name.len) 452 continue; 453 retval = memcmp(dentry->d_name.name, name, namelen); 454 if (retval > 0) 455 continue; 456 if (!retval) { 457 struct cramfs_inode entry = *de; 458 mutex_unlock(&read_mutex); 459 d_add(dentry, get_cramfs_inode(dir->i_sb, &entry)); 460 return NULL; 461 } 462 /* else (retval < 0) */ 463 if (sorted) 464 break; 465 } 466 mutex_unlock(&read_mutex); 467 d_add(dentry, NULL); 468 return NULL; 469 } 470 471 static int cramfs_readpage(struct file *file, struct page * page) 472 { 473 struct inode *inode = page->mapping->host; 474 u32 maxblock, bytes_filled; 475 void *pgdata; 476 477 maxblock = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 478 bytes_filled = 0; 479 if (page->index < maxblock) { 480 struct super_block *sb = inode->i_sb; 481 u32 blkptr_offset = OFFSET(inode) + page->index*4; 482 u32 start_offset, compr_len; 483 484 start_offset = OFFSET(inode) + maxblock*4; 485 mutex_lock(&read_mutex); 486 if (page->index) 487 start_offset = *(u32 *) cramfs_read(sb, blkptr_offset-4, 4); 488 compr_len = (*(u32 *) cramfs_read(sb, blkptr_offset, 4) - start_offset); 489 mutex_unlock(&read_mutex); 490 pgdata = kmap(page); 491 if (compr_len == 0) 492 ; /* hole */ 493 else if (compr_len > (PAGE_CACHE_SIZE << 1)) 494 printk(KERN_ERR "cramfs: bad compressed blocksize %u\n", compr_len); 495 else { 496 mutex_lock(&read_mutex); 497 bytes_filled = cramfs_uncompress_block(pgdata, 498 PAGE_CACHE_SIZE, 499 cramfs_read(sb, start_offset, compr_len), 500 compr_len); 501 mutex_unlock(&read_mutex); 502 } 503 } else 504 pgdata = kmap(page); 505 memset(pgdata + bytes_filled, 0, PAGE_CACHE_SIZE - bytes_filled); 506 kunmap(page); 507 flush_dcache_page(page); 508 SetPageUptodate(page); 509 unlock_page(page); 510 return 0; 511 } 512 513 static const struct address_space_operations cramfs_aops = { 514 .readpage = cramfs_readpage 515 }; 516 517 /* 518 * Our operations: 519 */ 520 521 /* 522 * A directory can only readdir 523 */ 524 static const struct file_operations cramfs_directory_operations = { 525 .llseek = generic_file_llseek, 526 .read = generic_read_dir, 527 .readdir = cramfs_readdir, 528 }; 529 530 static const struct inode_operations cramfs_dir_inode_operations = { 531 .lookup = cramfs_lookup, 532 }; 533 534 static const struct super_operations cramfs_ops = { 535 .put_super = cramfs_put_super, 536 .remount_fs = cramfs_remount, 537 .statfs = cramfs_statfs, 538 }; 539 540 static int cramfs_get_sb(struct file_system_type *fs_type, 541 int flags, const char *dev_name, void *data, struct vfsmount *mnt) 542 { 543 return get_sb_bdev(fs_type, flags, dev_name, data, cramfs_fill_super, 544 mnt); 545 } 546 547 static struct file_system_type cramfs_fs_type = { 548 .owner = THIS_MODULE, 549 .name = "cramfs", 550 .get_sb = cramfs_get_sb, 551 .kill_sb = kill_block_super, 552 .fs_flags = FS_REQUIRES_DEV, 553 }; 554 555 static int __init init_cramfs_fs(void) 556 { 557 int rv; 558 559 rv = cramfs_uncompress_init(); 560 if (rv < 0) 561 return rv; 562 rv = register_filesystem(&cramfs_fs_type); 563 if (rv < 0) 564 cramfs_uncompress_exit(); 565 return rv; 566 } 567 568 static void __exit exit_cramfs_fs(void) 569 { 570 cramfs_uncompress_exit(); 571 unregister_filesystem(&cramfs_fs_type); 572 } 573 574 module_init(init_cramfs_fs) 575 module_exit(exit_cramfs_fs) 576 MODULE_LICENSE("GPL"); 577