1 /* 2 * JFFS2 -- Journalling Flash File System, Version 2. 3 * 4 * Copyright © 2001-2007 Red Hat, Inc. 5 * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org> 6 * 7 * Created by David Woodhouse <dwmw2@infradead.org> 8 * 9 * For licensing information, see the file 'LICENCE' in this directory. 10 * 11 */ 12 13 #include <linux/capability.h> 14 #include <linux/kernel.h> 15 #include <linux/sched.h> 16 #include <linux/fs.h> 17 #include <linux/list.h> 18 #include <linux/mtd/mtd.h> 19 #include <linux/pagemap.h> 20 #include <linux/slab.h> 21 #include <linux/vmalloc.h> 22 #include <linux/vfs.h> 23 #include <linux/crc32.h> 24 #include "nodelist.h" 25 26 static int jffs2_flash_setup(struct jffs2_sb_info *c); 27 28 int jffs2_do_setattr (struct inode *inode, struct iattr *iattr) 29 { 30 struct jffs2_full_dnode *old_metadata, *new_metadata; 31 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); 32 struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); 33 struct jffs2_raw_inode *ri; 34 union jffs2_device_node dev; 35 unsigned char *mdata = NULL; 36 int mdatalen = 0; 37 unsigned int ivalid; 38 uint32_t alloclen; 39 int ret; 40 int alloc_type = ALLOC_NORMAL; 41 42 D1(printk(KERN_DEBUG "jffs2_setattr(): ino #%lu\n", inode->i_ino)); 43 44 /* Special cases - we don't want more than one data node 45 for these types on the medium at any time. So setattr 46 must read the original data associated with the node 47 (i.e. the device numbers or the target name) and write 48 it out again with the appropriate data attached */ 49 if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) { 50 /* For these, we don't actually need to read the old node */ 51 mdatalen = jffs2_encode_dev(&dev, inode->i_rdev); 52 mdata = (char *)&dev; 53 D1(printk(KERN_DEBUG "jffs2_setattr(): Writing %d bytes of kdev_t\n", mdatalen)); 54 } else if (S_ISLNK(inode->i_mode)) { 55 mutex_lock(&f->sem); 56 mdatalen = f->metadata->size; 57 mdata = kmalloc(f->metadata->size, GFP_USER); 58 if (!mdata) { 59 mutex_unlock(&f->sem); 60 return -ENOMEM; 61 } 62 ret = jffs2_read_dnode(c, f, f->metadata, mdata, 0, mdatalen); 63 if (ret) { 64 mutex_unlock(&f->sem); 65 kfree(mdata); 66 return ret; 67 } 68 mutex_unlock(&f->sem); 69 D1(printk(KERN_DEBUG "jffs2_setattr(): Writing %d bytes of symlink target\n", mdatalen)); 70 } 71 72 ri = jffs2_alloc_raw_inode(); 73 if (!ri) { 74 if (S_ISLNK(inode->i_mode)) 75 kfree(mdata); 76 return -ENOMEM; 77 } 78 79 ret = jffs2_reserve_space(c, sizeof(*ri) + mdatalen, &alloclen, 80 ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); 81 if (ret) { 82 jffs2_free_raw_inode(ri); 83 if (S_ISLNK(inode->i_mode)) 84 kfree(mdata); 85 return ret; 86 } 87 mutex_lock(&f->sem); 88 ivalid = iattr->ia_valid; 89 90 ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); 91 ri->nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE); 92 ri->totlen = cpu_to_je32(sizeof(*ri) + mdatalen); 93 ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4)); 94 95 ri->ino = cpu_to_je32(inode->i_ino); 96 ri->version = cpu_to_je32(++f->highest_version); 97 98 ri->uid = cpu_to_je16((ivalid & ATTR_UID)?iattr->ia_uid:inode->i_uid); 99 ri->gid = cpu_to_je16((ivalid & ATTR_GID)?iattr->ia_gid:inode->i_gid); 100 101 if (ivalid & ATTR_MODE) 102 ri->mode = cpu_to_jemode(iattr->ia_mode); 103 else 104 ri->mode = cpu_to_jemode(inode->i_mode); 105 106 107 ri->isize = cpu_to_je32((ivalid & ATTR_SIZE)?iattr->ia_size:inode->i_size); 108 ri->atime = cpu_to_je32(I_SEC((ivalid & ATTR_ATIME)?iattr->ia_atime:inode->i_atime)); 109 ri->mtime = cpu_to_je32(I_SEC((ivalid & ATTR_MTIME)?iattr->ia_mtime:inode->i_mtime)); 110 ri->ctime = cpu_to_je32(I_SEC((ivalid & ATTR_CTIME)?iattr->ia_ctime:inode->i_ctime)); 111 112 ri->offset = cpu_to_je32(0); 113 ri->csize = ri->dsize = cpu_to_je32(mdatalen); 114 ri->compr = JFFS2_COMPR_NONE; 115 if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) { 116 /* It's an extension. Make it a hole node */ 117 ri->compr = JFFS2_COMPR_ZERO; 118 ri->dsize = cpu_to_je32(iattr->ia_size - inode->i_size); 119 ri->offset = cpu_to_je32(inode->i_size); 120 } else if (ivalid & ATTR_SIZE && !iattr->ia_size) { 121 /* For truncate-to-zero, treat it as deletion because 122 it'll always be obsoleting all previous nodes */ 123 alloc_type = ALLOC_DELETION; 124 } 125 ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); 126 if (mdatalen) 127 ri->data_crc = cpu_to_je32(crc32(0, mdata, mdatalen)); 128 else 129 ri->data_crc = cpu_to_je32(0); 130 131 new_metadata = jffs2_write_dnode(c, f, ri, mdata, mdatalen, alloc_type); 132 if (S_ISLNK(inode->i_mode)) 133 kfree(mdata); 134 135 if (IS_ERR(new_metadata)) { 136 jffs2_complete_reservation(c); 137 jffs2_free_raw_inode(ri); 138 mutex_unlock(&f->sem); 139 return PTR_ERR(new_metadata); 140 } 141 /* It worked. Update the inode */ 142 inode->i_atime = ITIME(je32_to_cpu(ri->atime)); 143 inode->i_ctime = ITIME(je32_to_cpu(ri->ctime)); 144 inode->i_mtime = ITIME(je32_to_cpu(ri->mtime)); 145 inode->i_mode = jemode_to_cpu(ri->mode); 146 inode->i_uid = je16_to_cpu(ri->uid); 147 inode->i_gid = je16_to_cpu(ri->gid); 148 149 150 old_metadata = f->metadata; 151 152 if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size) 153 jffs2_truncate_fragtree (c, &f->fragtree, iattr->ia_size); 154 155 if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) { 156 jffs2_add_full_dnode_to_inode(c, f, new_metadata); 157 inode->i_size = iattr->ia_size; 158 inode->i_blocks = (inode->i_size + 511) >> 9; 159 f->metadata = NULL; 160 } else { 161 f->metadata = new_metadata; 162 } 163 if (old_metadata) { 164 jffs2_mark_node_obsolete(c, old_metadata->raw); 165 jffs2_free_full_dnode(old_metadata); 166 } 167 jffs2_free_raw_inode(ri); 168 169 mutex_unlock(&f->sem); 170 jffs2_complete_reservation(c); 171 172 /* We have to do the truncate_setsize() without f->sem held, since 173 some pages may be locked and waiting for it in readpage(). 174 We are protected from a simultaneous write() extending i_size 175 back past iattr->ia_size, because do_truncate() holds the 176 generic inode semaphore. */ 177 if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size) { 178 truncate_setsize(inode, iattr->ia_size); 179 inode->i_blocks = (inode->i_size + 511) >> 9; 180 } 181 182 return 0; 183 } 184 185 int jffs2_setattr(struct dentry *dentry, struct iattr *iattr) 186 { 187 int rc; 188 189 rc = inode_change_ok(dentry->d_inode, iattr); 190 if (rc) 191 return rc; 192 193 rc = jffs2_do_setattr(dentry->d_inode, iattr); 194 if (!rc && (iattr->ia_valid & ATTR_MODE)) 195 rc = jffs2_acl_chmod(dentry->d_inode); 196 197 return rc; 198 } 199 200 int jffs2_statfs(struct dentry *dentry, struct kstatfs *buf) 201 { 202 struct jffs2_sb_info *c = JFFS2_SB_INFO(dentry->d_sb); 203 unsigned long avail; 204 205 buf->f_type = JFFS2_SUPER_MAGIC; 206 buf->f_bsize = 1 << PAGE_SHIFT; 207 buf->f_blocks = c->flash_size >> PAGE_SHIFT; 208 buf->f_files = 0; 209 buf->f_ffree = 0; 210 buf->f_namelen = JFFS2_MAX_NAME_LEN; 211 buf->f_fsid.val[0] = JFFS2_SUPER_MAGIC; 212 buf->f_fsid.val[1] = c->mtd->index; 213 214 spin_lock(&c->erase_completion_lock); 215 avail = c->dirty_size + c->free_size; 216 if (avail > c->sector_size * c->resv_blocks_write) 217 avail -= c->sector_size * c->resv_blocks_write; 218 else 219 avail = 0; 220 spin_unlock(&c->erase_completion_lock); 221 222 buf->f_bavail = buf->f_bfree = avail >> PAGE_SHIFT; 223 224 return 0; 225 } 226 227 228 void jffs2_evict_inode (struct inode *inode) 229 { 230 /* We can forget about this inode for now - drop all 231 * the nodelists associated with it, etc. 232 */ 233 struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); 234 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); 235 236 D1(printk(KERN_DEBUG "jffs2_evict_inode(): ino #%lu mode %o\n", inode->i_ino, inode->i_mode)); 237 truncate_inode_pages(&inode->i_data, 0); 238 end_writeback(inode); 239 jffs2_do_clear_inode(c, f); 240 } 241 242 struct inode *jffs2_iget(struct super_block *sb, unsigned long ino) 243 { 244 struct jffs2_inode_info *f; 245 struct jffs2_sb_info *c; 246 struct jffs2_raw_inode latest_node; 247 union jffs2_device_node jdev; 248 struct inode *inode; 249 dev_t rdev = 0; 250 int ret; 251 252 D1(printk(KERN_DEBUG "jffs2_iget(): ino == %lu\n", ino)); 253 254 inode = iget_locked(sb, ino); 255 if (!inode) 256 return ERR_PTR(-ENOMEM); 257 if (!(inode->i_state & I_NEW)) 258 return inode; 259 260 f = JFFS2_INODE_INFO(inode); 261 c = JFFS2_SB_INFO(inode->i_sb); 262 263 jffs2_init_inode_info(f); 264 mutex_lock(&f->sem); 265 266 ret = jffs2_do_read_inode(c, f, inode->i_ino, &latest_node); 267 268 if (ret) { 269 mutex_unlock(&f->sem); 270 iget_failed(inode); 271 return ERR_PTR(ret); 272 } 273 inode->i_mode = jemode_to_cpu(latest_node.mode); 274 inode->i_uid = je16_to_cpu(latest_node.uid); 275 inode->i_gid = je16_to_cpu(latest_node.gid); 276 inode->i_size = je32_to_cpu(latest_node.isize); 277 inode->i_atime = ITIME(je32_to_cpu(latest_node.atime)); 278 inode->i_mtime = ITIME(je32_to_cpu(latest_node.mtime)); 279 inode->i_ctime = ITIME(je32_to_cpu(latest_node.ctime)); 280 281 inode->i_nlink = f->inocache->pino_nlink; 282 283 inode->i_blocks = (inode->i_size + 511) >> 9; 284 285 switch (inode->i_mode & S_IFMT) { 286 287 case S_IFLNK: 288 inode->i_op = &jffs2_symlink_inode_operations; 289 break; 290 291 case S_IFDIR: 292 { 293 struct jffs2_full_dirent *fd; 294 inode->i_nlink = 2; /* parent and '.' */ 295 296 for (fd=f->dents; fd; fd = fd->next) { 297 if (fd->type == DT_DIR && fd->ino) 298 inc_nlink(inode); 299 } 300 /* Root dir gets i_nlink 3 for some reason */ 301 if (inode->i_ino == 1) 302 inc_nlink(inode); 303 304 inode->i_op = &jffs2_dir_inode_operations; 305 inode->i_fop = &jffs2_dir_operations; 306 break; 307 } 308 case S_IFREG: 309 inode->i_op = &jffs2_file_inode_operations; 310 inode->i_fop = &jffs2_file_operations; 311 inode->i_mapping->a_ops = &jffs2_file_address_operations; 312 inode->i_mapping->nrpages = 0; 313 break; 314 315 case S_IFBLK: 316 case S_IFCHR: 317 /* Read the device numbers from the media */ 318 if (f->metadata->size != sizeof(jdev.old_id) && 319 f->metadata->size != sizeof(jdev.new_id)) { 320 printk(KERN_NOTICE "Device node has strange size %d\n", f->metadata->size); 321 goto error_io; 322 } 323 D1(printk(KERN_DEBUG "Reading device numbers from flash\n")); 324 ret = jffs2_read_dnode(c, f, f->metadata, (char *)&jdev, 0, f->metadata->size); 325 if (ret < 0) { 326 /* Eep */ 327 printk(KERN_NOTICE "Read device numbers for inode %lu failed\n", (unsigned long)inode->i_ino); 328 goto error; 329 } 330 if (f->metadata->size == sizeof(jdev.old_id)) 331 rdev = old_decode_dev(je16_to_cpu(jdev.old_id)); 332 else 333 rdev = new_decode_dev(je32_to_cpu(jdev.new_id)); 334 335 case S_IFSOCK: 336 case S_IFIFO: 337 inode->i_op = &jffs2_file_inode_operations; 338 init_special_inode(inode, inode->i_mode, rdev); 339 break; 340 341 default: 342 printk(KERN_WARNING "jffs2_read_inode(): Bogus imode %o for ino %lu\n", inode->i_mode, (unsigned long)inode->i_ino); 343 } 344 345 mutex_unlock(&f->sem); 346 347 D1(printk(KERN_DEBUG "jffs2_read_inode() returning\n")); 348 unlock_new_inode(inode); 349 return inode; 350 351 error_io: 352 ret = -EIO; 353 error: 354 mutex_unlock(&f->sem); 355 jffs2_do_clear_inode(c, f); 356 iget_failed(inode); 357 return ERR_PTR(ret); 358 } 359 360 void jffs2_dirty_inode(struct inode *inode, int flags) 361 { 362 struct iattr iattr; 363 364 if (!(inode->i_state & I_DIRTY_DATASYNC)) { 365 D2(printk(KERN_DEBUG "jffs2_dirty_inode() not calling setattr() for ino #%lu\n", inode->i_ino)); 366 return; 367 } 368 369 D1(printk(KERN_DEBUG "jffs2_dirty_inode() calling setattr() for ino #%lu\n", inode->i_ino)); 370 371 iattr.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_MTIME|ATTR_CTIME; 372 iattr.ia_mode = inode->i_mode; 373 iattr.ia_uid = inode->i_uid; 374 iattr.ia_gid = inode->i_gid; 375 iattr.ia_atime = inode->i_atime; 376 iattr.ia_mtime = inode->i_mtime; 377 iattr.ia_ctime = inode->i_ctime; 378 379 jffs2_do_setattr(inode, &iattr); 380 } 381 382 int jffs2_remount_fs (struct super_block *sb, int *flags, char *data) 383 { 384 struct jffs2_sb_info *c = JFFS2_SB_INFO(sb); 385 386 if (c->flags & JFFS2_SB_FLAG_RO && !(sb->s_flags & MS_RDONLY)) 387 return -EROFS; 388 389 /* We stop if it was running, then restart if it needs to. 390 This also catches the case where it was stopped and this 391 is just a remount to restart it. 392 Flush the writebuffer, if neccecary, else we loose it */ 393 if (!(sb->s_flags & MS_RDONLY)) { 394 jffs2_stop_garbage_collect_thread(c); 395 mutex_lock(&c->alloc_sem); 396 jffs2_flush_wbuf_pad(c); 397 mutex_unlock(&c->alloc_sem); 398 } 399 400 if (!(*flags & MS_RDONLY)) 401 jffs2_start_garbage_collect_thread(c); 402 403 *flags |= MS_NOATIME; 404 return 0; 405 } 406 407 /* jffs2_new_inode: allocate a new inode and inocache, add it to the hash, 408 fill in the raw_inode while you're at it. */ 409 struct inode *jffs2_new_inode (struct inode *dir_i, umode_t mode, struct jffs2_raw_inode *ri) 410 { 411 struct inode *inode; 412 struct super_block *sb = dir_i->i_sb; 413 struct jffs2_sb_info *c; 414 struct jffs2_inode_info *f; 415 int ret; 416 417 D1(printk(KERN_DEBUG "jffs2_new_inode(): dir_i %ld, mode 0x%x\n", dir_i->i_ino, mode)); 418 419 c = JFFS2_SB_INFO(sb); 420 421 inode = new_inode(sb); 422 423 if (!inode) 424 return ERR_PTR(-ENOMEM); 425 426 f = JFFS2_INODE_INFO(inode); 427 jffs2_init_inode_info(f); 428 mutex_lock(&f->sem); 429 430 memset(ri, 0, sizeof(*ri)); 431 /* Set OS-specific defaults for new inodes */ 432 ri->uid = cpu_to_je16(current_fsuid()); 433 434 if (dir_i->i_mode & S_ISGID) { 435 ri->gid = cpu_to_je16(dir_i->i_gid); 436 if (S_ISDIR(mode)) 437 mode |= S_ISGID; 438 } else { 439 ri->gid = cpu_to_je16(current_fsgid()); 440 } 441 442 /* POSIX ACLs have to be processed now, at least partly. 443 The umask is only applied if there's no default ACL */ 444 ret = jffs2_init_acl_pre(dir_i, inode, &mode); 445 if (ret) { 446 make_bad_inode(inode); 447 iput(inode); 448 return ERR_PTR(ret); 449 } 450 ret = jffs2_do_new_inode (c, f, mode, ri); 451 if (ret) { 452 make_bad_inode(inode); 453 iput(inode); 454 return ERR_PTR(ret); 455 } 456 inode->i_nlink = 1; 457 inode->i_ino = je32_to_cpu(ri->ino); 458 inode->i_mode = jemode_to_cpu(ri->mode); 459 inode->i_gid = je16_to_cpu(ri->gid); 460 inode->i_uid = je16_to_cpu(ri->uid); 461 inode->i_atime = inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC; 462 ri->atime = ri->mtime = ri->ctime = cpu_to_je32(I_SEC(inode->i_mtime)); 463 464 inode->i_blocks = 0; 465 inode->i_size = 0; 466 467 if (insert_inode_locked(inode) < 0) { 468 make_bad_inode(inode); 469 unlock_new_inode(inode); 470 iput(inode); 471 return ERR_PTR(-EINVAL); 472 } 473 474 return inode; 475 } 476 477 static int calculate_inocache_hashsize(uint32_t flash_size) 478 { 479 /* 480 * Pick a inocache hash size based on the size of the medium. 481 * Count how many megabytes we're dealing with, apply a hashsize twice 482 * that size, but rounding down to the usual big powers of 2. And keep 483 * to sensible bounds. 484 */ 485 486 int size_mb = flash_size / 1024 / 1024; 487 int hashsize = (size_mb * 2) & ~0x3f; 488 489 if (hashsize < INOCACHE_HASHSIZE_MIN) 490 return INOCACHE_HASHSIZE_MIN; 491 if (hashsize > INOCACHE_HASHSIZE_MAX) 492 return INOCACHE_HASHSIZE_MAX; 493 494 return hashsize; 495 } 496 497 int jffs2_do_fill_super(struct super_block *sb, void *data, int silent) 498 { 499 struct jffs2_sb_info *c; 500 struct inode *root_i; 501 int ret; 502 size_t blocks; 503 504 c = JFFS2_SB_INFO(sb); 505 506 #ifndef CONFIG_JFFS2_FS_WRITEBUFFER 507 if (c->mtd->type == MTD_NANDFLASH) { 508 printk(KERN_ERR "jffs2: Cannot operate on NAND flash unless jffs2 NAND support is compiled in.\n"); 509 return -EINVAL; 510 } 511 if (c->mtd->type == MTD_DATAFLASH) { 512 printk(KERN_ERR "jffs2: Cannot operate on DataFlash unless jffs2 DataFlash support is compiled in.\n"); 513 return -EINVAL; 514 } 515 #endif 516 517 c->flash_size = c->mtd->size; 518 c->sector_size = c->mtd->erasesize; 519 blocks = c->flash_size / c->sector_size; 520 521 /* 522 * Size alignment check 523 */ 524 if ((c->sector_size * blocks) != c->flash_size) { 525 c->flash_size = c->sector_size * blocks; 526 printk(KERN_INFO "jffs2: Flash size not aligned to erasesize, reducing to %dKiB\n", 527 c->flash_size / 1024); 528 } 529 530 if (c->flash_size < 5*c->sector_size) { 531 printk(KERN_ERR "jffs2: Too few erase blocks (%d)\n", c->flash_size / c->sector_size); 532 return -EINVAL; 533 } 534 535 c->cleanmarker_size = sizeof(struct jffs2_unknown_node); 536 537 /* NAND (or other bizarre) flash... do setup accordingly */ 538 ret = jffs2_flash_setup(c); 539 if (ret) 540 return ret; 541 542 c->inocache_hashsize = calculate_inocache_hashsize(c->flash_size); 543 c->inocache_list = kcalloc(c->inocache_hashsize, sizeof(struct jffs2_inode_cache *), GFP_KERNEL); 544 if (!c->inocache_list) { 545 ret = -ENOMEM; 546 goto out_wbuf; 547 } 548 549 jffs2_init_xattr_subsystem(c); 550 551 if ((ret = jffs2_do_mount_fs(c))) 552 goto out_inohash; 553 554 D1(printk(KERN_DEBUG "jffs2_do_fill_super(): Getting root inode\n")); 555 root_i = jffs2_iget(sb, 1); 556 if (IS_ERR(root_i)) { 557 D1(printk(KERN_WARNING "get root inode failed\n")); 558 ret = PTR_ERR(root_i); 559 goto out_root; 560 } 561 562 ret = -ENOMEM; 563 564 D1(printk(KERN_DEBUG "jffs2_do_fill_super(): d_alloc_root()\n")); 565 sb->s_root = d_alloc_root(root_i); 566 if (!sb->s_root) 567 goto out_root_i; 568 569 sb->s_maxbytes = 0xFFFFFFFF; 570 sb->s_blocksize = PAGE_CACHE_SIZE; 571 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 572 sb->s_magic = JFFS2_SUPER_MAGIC; 573 if (!(sb->s_flags & MS_RDONLY)) 574 jffs2_start_garbage_collect_thread(c); 575 return 0; 576 577 out_root_i: 578 iput(root_i); 579 out_root: 580 jffs2_free_ino_caches(c); 581 jffs2_free_raw_node_refs(c); 582 if (jffs2_blocks_use_vmalloc(c)) 583 vfree(c->blocks); 584 else 585 kfree(c->blocks); 586 out_inohash: 587 jffs2_clear_xattr_subsystem(c); 588 kfree(c->inocache_list); 589 out_wbuf: 590 jffs2_flash_cleanup(c); 591 592 return ret; 593 } 594 595 void jffs2_gc_release_inode(struct jffs2_sb_info *c, 596 struct jffs2_inode_info *f) 597 { 598 iput(OFNI_EDONI_2SFFJ(f)); 599 } 600 601 struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c, 602 int inum, int unlinked) 603 { 604 struct inode *inode; 605 struct jffs2_inode_cache *ic; 606 607 if (unlinked) { 608 /* The inode has zero nlink but its nodes weren't yet marked 609 obsolete. This has to be because we're still waiting for 610 the final (close() and) iput() to happen. 611 612 There's a possibility that the final iput() could have 613 happened while we were contemplating. In order to ensure 614 that we don't cause a new read_inode() (which would fail) 615 for the inode in question, we use ilookup() in this case 616 instead of iget(). 617 618 The nlink can't _become_ zero at this point because we're 619 holding the alloc_sem, and jffs2_do_unlink() would also 620 need that while decrementing nlink on any inode. 621 */ 622 inode = ilookup(OFNI_BS_2SFFJ(c), inum); 623 if (!inode) { 624 D1(printk(KERN_DEBUG "ilookup() failed for ino #%u; inode is probably deleted.\n", 625 inum)); 626 627 spin_lock(&c->inocache_lock); 628 ic = jffs2_get_ino_cache(c, inum); 629 if (!ic) { 630 D1(printk(KERN_DEBUG "Inode cache for ino #%u is gone.\n", inum)); 631 spin_unlock(&c->inocache_lock); 632 return NULL; 633 } 634 if (ic->state != INO_STATE_CHECKEDABSENT) { 635 /* Wait for progress. Don't just loop */ 636 D1(printk(KERN_DEBUG "Waiting for ino #%u in state %d\n", 637 ic->ino, ic->state)); 638 sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock); 639 } else { 640 spin_unlock(&c->inocache_lock); 641 } 642 643 return NULL; 644 } 645 } else { 646 /* Inode has links to it still; they're not going away because 647 jffs2_do_unlink() would need the alloc_sem and we have it. 648 Just iget() it, and if read_inode() is necessary that's OK. 649 */ 650 inode = jffs2_iget(OFNI_BS_2SFFJ(c), inum); 651 if (IS_ERR(inode)) 652 return ERR_CAST(inode); 653 } 654 if (is_bad_inode(inode)) { 655 printk(KERN_NOTICE "Eep. read_inode() failed for ino #%u. unlinked %d\n", 656 inum, unlinked); 657 /* NB. This will happen again. We need to do something appropriate here. */ 658 iput(inode); 659 return ERR_PTR(-EIO); 660 } 661 662 return JFFS2_INODE_INFO(inode); 663 } 664 665 unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c, 666 struct jffs2_inode_info *f, 667 unsigned long offset, 668 unsigned long *priv) 669 { 670 struct inode *inode = OFNI_EDONI_2SFFJ(f); 671 struct page *pg; 672 673 pg = read_cache_page_async(inode->i_mapping, offset >> PAGE_CACHE_SHIFT, 674 (void *)jffs2_do_readpage_unlock, inode); 675 if (IS_ERR(pg)) 676 return (void *)pg; 677 678 *priv = (unsigned long)pg; 679 return kmap(pg); 680 } 681 682 void jffs2_gc_release_page(struct jffs2_sb_info *c, 683 unsigned char *ptr, 684 unsigned long *priv) 685 { 686 struct page *pg = (void *)*priv; 687 688 kunmap(pg); 689 page_cache_release(pg); 690 } 691 692 static int jffs2_flash_setup(struct jffs2_sb_info *c) { 693 int ret = 0; 694 695 if (jffs2_cleanmarker_oob(c)) { 696 /* NAND flash... do setup accordingly */ 697 ret = jffs2_nand_flash_setup(c); 698 if (ret) 699 return ret; 700 } 701 702 /* and Dataflash */ 703 if (jffs2_dataflash(c)) { 704 ret = jffs2_dataflash_setup(c); 705 if (ret) 706 return ret; 707 } 708 709 /* and Intel "Sibley" flash */ 710 if (jffs2_nor_wbuf_flash(c)) { 711 ret = jffs2_nor_wbuf_flash_setup(c); 712 if (ret) 713 return ret; 714 } 715 716 /* and an UBI volume */ 717 if (jffs2_ubivol(c)) { 718 ret = jffs2_ubivol_setup(c); 719 if (ret) 720 return ret; 721 } 722 723 return ret; 724 } 725 726 void jffs2_flash_cleanup(struct jffs2_sb_info *c) { 727 728 if (jffs2_cleanmarker_oob(c)) { 729 jffs2_nand_flash_cleanup(c); 730 } 731 732 /* and DataFlash */ 733 if (jffs2_dataflash(c)) { 734 jffs2_dataflash_cleanup(c); 735 } 736 737 /* and Intel "Sibley" flash */ 738 if (jffs2_nor_wbuf_flash(c)) { 739 jffs2_nor_wbuf_flash_cleanup(c); 740 } 741 742 /* and an UBI volume */ 743 if (jffs2_ubivol(c)) { 744 jffs2_ubivol_cleanup(c); 745 } 746 } 747