xref: /openbmc/linux/fs/jffs2/fs.c (revision ced22070)
1 /*
2  * JFFS2 -- Journalling Flash File System, Version 2.
3  *
4  * Copyright © 2001-2007 Red Hat, Inc.
5  *
6  * Created by David Woodhouse <dwmw2@infradead.org>
7  *
8  * For licensing information, see the file 'LICENCE' in this directory.
9  *
10  */
11 
12 #include <linux/capability.h>
13 #include <linux/kernel.h>
14 #include <linux/sched.h>
15 #include <linux/fs.h>
16 #include <linux/list.h>
17 #include <linux/mtd/mtd.h>
18 #include <linux/pagemap.h>
19 #include <linux/slab.h>
20 #include <linux/vmalloc.h>
21 #include <linux/vfs.h>
22 #include <linux/crc32.h>
23 #include "nodelist.h"
24 
25 static int jffs2_flash_setup(struct jffs2_sb_info *c);
26 
27 int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
28 {
29 	struct jffs2_full_dnode *old_metadata, *new_metadata;
30 	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
31 	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
32 	struct jffs2_raw_inode *ri;
33 	union jffs2_device_node dev;
34 	unsigned char *mdata = NULL;
35 	int mdatalen = 0;
36 	unsigned int ivalid;
37 	uint32_t alloclen;
38 	int ret;
39 	int alloc_type = ALLOC_NORMAL;
40 
41 	D1(printk(KERN_DEBUG "jffs2_setattr(): ino #%lu\n", inode->i_ino));
42 
43 	/* Special cases - we don't want more than one data node
44 	   for these types on the medium at any time. So setattr
45 	   must read the original data associated with the node
46 	   (i.e. the device numbers or the target name) and write
47 	   it out again with the appropriate data attached */
48 	if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) {
49 		/* For these, we don't actually need to read the old node */
50 		mdatalen = jffs2_encode_dev(&dev, inode->i_rdev);
51 		mdata = (char *)&dev;
52 		D1(printk(KERN_DEBUG "jffs2_setattr(): Writing %d bytes of kdev_t\n", mdatalen));
53 	} else if (S_ISLNK(inode->i_mode)) {
54 		mutex_lock(&f->sem);
55 		mdatalen = f->metadata->size;
56 		mdata = kmalloc(f->metadata->size, GFP_USER);
57 		if (!mdata) {
58 			mutex_unlock(&f->sem);
59 			return -ENOMEM;
60 		}
61 		ret = jffs2_read_dnode(c, f, f->metadata, mdata, 0, mdatalen);
62 		if (ret) {
63 			mutex_unlock(&f->sem);
64 			kfree(mdata);
65 			return ret;
66 		}
67 		mutex_unlock(&f->sem);
68 		D1(printk(KERN_DEBUG "jffs2_setattr(): Writing %d bytes of symlink target\n", mdatalen));
69 	}
70 
71 	ri = jffs2_alloc_raw_inode();
72 	if (!ri) {
73 		if (S_ISLNK(inode->i_mode))
74 			kfree(mdata);
75 		return -ENOMEM;
76 	}
77 
78 	ret = jffs2_reserve_space(c, sizeof(*ri) + mdatalen, &alloclen,
79 				  ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
80 	if (ret) {
81 		jffs2_free_raw_inode(ri);
82 		if (S_ISLNK(inode->i_mode & S_IFMT))
83 			 kfree(mdata);
84 		return ret;
85 	}
86 	mutex_lock(&f->sem);
87 	ivalid = iattr->ia_valid;
88 
89 	ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
90 	ri->nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
91 	ri->totlen = cpu_to_je32(sizeof(*ri) + mdatalen);
92 	ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4));
93 
94 	ri->ino = cpu_to_je32(inode->i_ino);
95 	ri->version = cpu_to_je32(++f->highest_version);
96 
97 	ri->uid = cpu_to_je16((ivalid & ATTR_UID)?iattr->ia_uid:inode->i_uid);
98 	ri->gid = cpu_to_je16((ivalid & ATTR_GID)?iattr->ia_gid:inode->i_gid);
99 
100 	if (ivalid & ATTR_MODE)
101 		ri->mode = cpu_to_jemode(iattr->ia_mode);
102 	else
103 		ri->mode = cpu_to_jemode(inode->i_mode);
104 
105 
106 	ri->isize = cpu_to_je32((ivalid & ATTR_SIZE)?iattr->ia_size:inode->i_size);
107 	ri->atime = cpu_to_je32(I_SEC((ivalid & ATTR_ATIME)?iattr->ia_atime:inode->i_atime));
108 	ri->mtime = cpu_to_je32(I_SEC((ivalid & ATTR_MTIME)?iattr->ia_mtime:inode->i_mtime));
109 	ri->ctime = cpu_to_je32(I_SEC((ivalid & ATTR_CTIME)?iattr->ia_ctime:inode->i_ctime));
110 
111 	ri->offset = cpu_to_je32(0);
112 	ri->csize = ri->dsize = cpu_to_je32(mdatalen);
113 	ri->compr = JFFS2_COMPR_NONE;
114 	if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) {
115 		/* It's an extension. Make it a hole node */
116 		ri->compr = JFFS2_COMPR_ZERO;
117 		ri->dsize = cpu_to_je32(iattr->ia_size - inode->i_size);
118 		ri->offset = cpu_to_je32(inode->i_size);
119 	} else if (ivalid & ATTR_SIZE && !iattr->ia_size) {
120 		/* For truncate-to-zero, treat it as deletion because
121 		   it'll always be obsoleting all previous nodes */
122 		alloc_type = ALLOC_DELETION;
123 	}
124 	ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
125 	if (mdatalen)
126 		ri->data_crc = cpu_to_je32(crc32(0, mdata, mdatalen));
127 	else
128 		ri->data_crc = cpu_to_je32(0);
129 
130 	new_metadata = jffs2_write_dnode(c, f, ri, mdata, mdatalen, alloc_type);
131 	if (S_ISLNK(inode->i_mode))
132 		kfree(mdata);
133 
134 	if (IS_ERR(new_metadata)) {
135 		jffs2_complete_reservation(c);
136 		jffs2_free_raw_inode(ri);
137 		mutex_unlock(&f->sem);
138 		return PTR_ERR(new_metadata);
139 	}
140 	/* It worked. Update the inode */
141 	inode->i_atime = ITIME(je32_to_cpu(ri->atime));
142 	inode->i_ctime = ITIME(je32_to_cpu(ri->ctime));
143 	inode->i_mtime = ITIME(je32_to_cpu(ri->mtime));
144 	inode->i_mode = jemode_to_cpu(ri->mode);
145 	inode->i_uid = je16_to_cpu(ri->uid);
146 	inode->i_gid = je16_to_cpu(ri->gid);
147 
148 
149 	old_metadata = f->metadata;
150 
151 	if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size)
152 		jffs2_truncate_fragtree (c, &f->fragtree, iattr->ia_size);
153 
154 	if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) {
155 		jffs2_add_full_dnode_to_inode(c, f, new_metadata);
156 		inode->i_size = iattr->ia_size;
157 		inode->i_blocks = (inode->i_size + 511) >> 9;
158 		f->metadata = NULL;
159 	} else {
160 		f->metadata = new_metadata;
161 	}
162 	if (old_metadata) {
163 		jffs2_mark_node_obsolete(c, old_metadata->raw);
164 		jffs2_free_full_dnode(old_metadata);
165 	}
166 	jffs2_free_raw_inode(ri);
167 
168 	mutex_unlock(&f->sem);
169 	jffs2_complete_reservation(c);
170 
171 	/* We have to do the vmtruncate() without f->sem held, since
172 	   some pages may be locked and waiting for it in readpage().
173 	   We are protected from a simultaneous write() extending i_size
174 	   back past iattr->ia_size, because do_truncate() holds the
175 	   generic inode semaphore. */
176 	if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size) {
177 		vmtruncate(inode, iattr->ia_size);
178 		inode->i_blocks = (inode->i_size + 511) >> 9;
179 	}
180 
181 	return 0;
182 }
183 
184 int jffs2_setattr(struct dentry *dentry, struct iattr *iattr)
185 {
186 	int rc;
187 
188 	rc = inode_change_ok(dentry->d_inode, iattr);
189 	if (rc)
190 		return rc;
191 
192 	rc = jffs2_do_setattr(dentry->d_inode, iattr);
193 	if (!rc && (iattr->ia_valid & ATTR_MODE))
194 		rc = jffs2_acl_chmod(dentry->d_inode);
195 
196 	return rc;
197 }
198 
199 int jffs2_statfs(struct dentry *dentry, struct kstatfs *buf)
200 {
201 	struct jffs2_sb_info *c = JFFS2_SB_INFO(dentry->d_sb);
202 	unsigned long avail;
203 
204 	buf->f_type = JFFS2_SUPER_MAGIC;
205 	buf->f_bsize = 1 << PAGE_SHIFT;
206 	buf->f_blocks = c->flash_size >> PAGE_SHIFT;
207 	buf->f_files = 0;
208 	buf->f_ffree = 0;
209 	buf->f_namelen = JFFS2_MAX_NAME_LEN;
210 
211 	spin_lock(&c->erase_completion_lock);
212 	avail = c->dirty_size + c->free_size;
213 	if (avail > c->sector_size * c->resv_blocks_write)
214 		avail -= c->sector_size * c->resv_blocks_write;
215 	else
216 		avail = 0;
217 	spin_unlock(&c->erase_completion_lock);
218 
219 	buf->f_bavail = buf->f_bfree = avail >> PAGE_SHIFT;
220 
221 	return 0;
222 }
223 
224 
225 void jffs2_clear_inode (struct inode *inode)
226 {
227 	/* We can forget about this inode for now - drop all
228 	 *  the nodelists associated with it, etc.
229 	 */
230 	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
231 	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
232 
233 	D1(printk(KERN_DEBUG "jffs2_clear_inode(): ino #%lu mode %o\n", inode->i_ino, inode->i_mode));
234 	jffs2_do_clear_inode(c, f);
235 }
236 
237 struct inode *jffs2_iget(struct super_block *sb, unsigned long ino)
238 {
239 	struct jffs2_inode_info *f;
240 	struct jffs2_sb_info *c;
241 	struct jffs2_raw_inode latest_node;
242 	union jffs2_device_node jdev;
243 	struct inode *inode;
244 	dev_t rdev = 0;
245 	int ret;
246 
247 	D1(printk(KERN_DEBUG "jffs2_iget(): ino == %lu\n", ino));
248 
249 	inode = iget_locked(sb, ino);
250 	if (!inode)
251 		return ERR_PTR(-ENOMEM);
252 	if (!(inode->i_state & I_NEW))
253 		return inode;
254 
255 	f = JFFS2_INODE_INFO(inode);
256 	c = JFFS2_SB_INFO(inode->i_sb);
257 
258 	jffs2_init_inode_info(f);
259 	mutex_lock(&f->sem);
260 
261 	ret = jffs2_do_read_inode(c, f, inode->i_ino, &latest_node);
262 
263 	if (ret) {
264 		mutex_unlock(&f->sem);
265 		iget_failed(inode);
266 		return ERR_PTR(ret);
267 	}
268 	inode->i_mode = jemode_to_cpu(latest_node.mode);
269 	inode->i_uid = je16_to_cpu(latest_node.uid);
270 	inode->i_gid = je16_to_cpu(latest_node.gid);
271 	inode->i_size = je32_to_cpu(latest_node.isize);
272 	inode->i_atime = ITIME(je32_to_cpu(latest_node.atime));
273 	inode->i_mtime = ITIME(je32_to_cpu(latest_node.mtime));
274 	inode->i_ctime = ITIME(je32_to_cpu(latest_node.ctime));
275 
276 	inode->i_nlink = f->inocache->nlink;
277 
278 	inode->i_blocks = (inode->i_size + 511) >> 9;
279 
280 	switch (inode->i_mode & S_IFMT) {
281 
282 	case S_IFLNK:
283 		inode->i_op = &jffs2_symlink_inode_operations;
284 		break;
285 
286 	case S_IFDIR:
287 	{
288 		struct jffs2_full_dirent *fd;
289 
290 		for (fd=f->dents; fd; fd = fd->next) {
291 			if (fd->type == DT_DIR && fd->ino)
292 				inc_nlink(inode);
293 		}
294 		/* and '..' */
295 		inc_nlink(inode);
296 		/* Root dir gets i_nlink 3 for some reason */
297 		if (inode->i_ino == 1)
298 			inc_nlink(inode);
299 
300 		inode->i_op = &jffs2_dir_inode_operations;
301 		inode->i_fop = &jffs2_dir_operations;
302 		break;
303 	}
304 	case S_IFREG:
305 		inode->i_op = &jffs2_file_inode_operations;
306 		inode->i_fop = &jffs2_file_operations;
307 		inode->i_mapping->a_ops = &jffs2_file_address_operations;
308 		inode->i_mapping->nrpages = 0;
309 		break;
310 
311 	case S_IFBLK:
312 	case S_IFCHR:
313 		/* Read the device numbers from the media */
314 		if (f->metadata->size != sizeof(jdev.old) &&
315 		    f->metadata->size != sizeof(jdev.new)) {
316 			printk(KERN_NOTICE "Device node has strange size %d\n", f->metadata->size);
317 			goto error_io;
318 		}
319 		D1(printk(KERN_DEBUG "Reading device numbers from flash\n"));
320 		ret = jffs2_read_dnode(c, f, f->metadata, (char *)&jdev, 0, f->metadata->size);
321 		if (ret < 0) {
322 			/* Eep */
323 			printk(KERN_NOTICE "Read device numbers for inode %lu failed\n", (unsigned long)inode->i_ino);
324 			goto error;
325 		}
326 		if (f->metadata->size == sizeof(jdev.old))
327 			rdev = old_decode_dev(je16_to_cpu(jdev.old));
328 		else
329 			rdev = new_decode_dev(je32_to_cpu(jdev.new));
330 
331 	case S_IFSOCK:
332 	case S_IFIFO:
333 		inode->i_op = &jffs2_file_inode_operations;
334 		init_special_inode(inode, inode->i_mode, rdev);
335 		break;
336 
337 	default:
338 		printk(KERN_WARNING "jffs2_read_inode(): Bogus imode %o for ino %lu\n", inode->i_mode, (unsigned long)inode->i_ino);
339 	}
340 
341 	mutex_unlock(&f->sem);
342 
343 	D1(printk(KERN_DEBUG "jffs2_read_inode() returning\n"));
344 	unlock_new_inode(inode);
345 	return inode;
346 
347 error_io:
348 	ret = -EIO;
349 error:
350 	mutex_unlock(&f->sem);
351 	jffs2_do_clear_inode(c, f);
352 	iget_failed(inode);
353 	return ERR_PTR(ret);
354 }
355 
356 void jffs2_dirty_inode(struct inode *inode)
357 {
358 	struct iattr iattr;
359 
360 	if (!(inode->i_state & I_DIRTY_DATASYNC)) {
361 		D2(printk(KERN_DEBUG "jffs2_dirty_inode() not calling setattr() for ino #%lu\n", inode->i_ino));
362 		return;
363 	}
364 
365 	D1(printk(KERN_DEBUG "jffs2_dirty_inode() calling setattr() for ino #%lu\n", inode->i_ino));
366 
367 	iattr.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_MTIME|ATTR_CTIME;
368 	iattr.ia_mode = inode->i_mode;
369 	iattr.ia_uid = inode->i_uid;
370 	iattr.ia_gid = inode->i_gid;
371 	iattr.ia_atime = inode->i_atime;
372 	iattr.ia_mtime = inode->i_mtime;
373 	iattr.ia_ctime = inode->i_ctime;
374 
375 	jffs2_do_setattr(inode, &iattr);
376 }
377 
378 int jffs2_remount_fs (struct super_block *sb, int *flags, char *data)
379 {
380 	struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
381 
382 	if (c->flags & JFFS2_SB_FLAG_RO && !(sb->s_flags & MS_RDONLY))
383 		return -EROFS;
384 
385 	/* We stop if it was running, then restart if it needs to.
386 	   This also catches the case where it was stopped and this
387 	   is just a remount to restart it.
388 	   Flush the writebuffer, if neccecary, else we loose it */
389 	if (!(sb->s_flags & MS_RDONLY)) {
390 		jffs2_stop_garbage_collect_thread(c);
391 		mutex_lock(&c->alloc_sem);
392 		jffs2_flush_wbuf_pad(c);
393 		mutex_unlock(&c->alloc_sem);
394 	}
395 
396 	if (!(*flags & MS_RDONLY))
397 		jffs2_start_garbage_collect_thread(c);
398 
399 	*flags |= MS_NOATIME;
400 
401 	return 0;
402 }
403 
404 void jffs2_write_super (struct super_block *sb)
405 {
406 	struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
407 	sb->s_dirt = 0;
408 
409 	if (sb->s_flags & MS_RDONLY)
410 		return;
411 
412 	D1(printk(KERN_DEBUG "jffs2_write_super()\n"));
413 	jffs2_garbage_collect_trigger(c);
414 	jffs2_erase_pending_blocks(c, 0);
415 	jffs2_flush_wbuf_gc(c, 0);
416 }
417 
418 
419 /* jffs2_new_inode: allocate a new inode and inocache, add it to the hash,
420    fill in the raw_inode while you're at it. */
421 struct inode *jffs2_new_inode (struct inode *dir_i, int mode, struct jffs2_raw_inode *ri)
422 {
423 	struct inode *inode;
424 	struct super_block *sb = dir_i->i_sb;
425 	struct jffs2_sb_info *c;
426 	struct jffs2_inode_info *f;
427 	int ret;
428 
429 	D1(printk(KERN_DEBUG "jffs2_new_inode(): dir_i %ld, mode 0x%x\n", dir_i->i_ino, mode));
430 
431 	c = JFFS2_SB_INFO(sb);
432 
433 	inode = new_inode(sb);
434 
435 	if (!inode)
436 		return ERR_PTR(-ENOMEM);
437 
438 	f = JFFS2_INODE_INFO(inode);
439 	jffs2_init_inode_info(f);
440 	mutex_lock(&f->sem);
441 
442 	memset(ri, 0, sizeof(*ri));
443 	/* Set OS-specific defaults for new inodes */
444 	ri->uid = cpu_to_je16(current->fsuid);
445 
446 	if (dir_i->i_mode & S_ISGID) {
447 		ri->gid = cpu_to_je16(dir_i->i_gid);
448 		if (S_ISDIR(mode))
449 			mode |= S_ISGID;
450 	} else {
451 		ri->gid = cpu_to_je16(current->fsgid);
452 	}
453 
454 	/* POSIX ACLs have to be processed now, at least partly.
455 	   The umask is only applied if there's no default ACL */
456 	ret = jffs2_init_acl_pre(dir_i, inode, &mode);
457 	if (ret) {
458 	    make_bad_inode(inode);
459 	    iput(inode);
460 	    return ERR_PTR(ret);
461 	}
462 	ret = jffs2_do_new_inode (c, f, mode, ri);
463 	if (ret) {
464 		make_bad_inode(inode);
465 		iput(inode);
466 		return ERR_PTR(ret);
467 	}
468 	inode->i_nlink = 1;
469 	inode->i_ino = je32_to_cpu(ri->ino);
470 	inode->i_mode = jemode_to_cpu(ri->mode);
471 	inode->i_gid = je16_to_cpu(ri->gid);
472 	inode->i_uid = je16_to_cpu(ri->uid);
473 	inode->i_atime = inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
474 	ri->atime = ri->mtime = ri->ctime = cpu_to_je32(I_SEC(inode->i_mtime));
475 
476 	inode->i_blocks = 0;
477 	inode->i_size = 0;
478 
479 	insert_inode_hash(inode);
480 
481 	return inode;
482 }
483 
484 
485 int jffs2_do_fill_super(struct super_block *sb, void *data, int silent)
486 {
487 	struct jffs2_sb_info *c;
488 	struct inode *root_i;
489 	int ret;
490 	size_t blocks;
491 
492 	c = JFFS2_SB_INFO(sb);
493 
494 #ifndef CONFIG_JFFS2_FS_WRITEBUFFER
495 	if (c->mtd->type == MTD_NANDFLASH) {
496 		printk(KERN_ERR "jffs2: Cannot operate on NAND flash unless jffs2 NAND support is compiled in.\n");
497 		return -EINVAL;
498 	}
499 	if (c->mtd->type == MTD_DATAFLASH) {
500 		printk(KERN_ERR "jffs2: Cannot operate on DataFlash unless jffs2 DataFlash support is compiled in.\n");
501 		return -EINVAL;
502 	}
503 #endif
504 
505 	c->flash_size = c->mtd->size;
506 	c->sector_size = c->mtd->erasesize;
507 	blocks = c->flash_size / c->sector_size;
508 
509 	/*
510 	 * Size alignment check
511 	 */
512 	if ((c->sector_size * blocks) != c->flash_size) {
513 		c->flash_size = c->sector_size * blocks;
514 		printk(KERN_INFO "jffs2: Flash size not aligned to erasesize, reducing to %dKiB\n",
515 			c->flash_size / 1024);
516 	}
517 
518 	if (c->flash_size < 5*c->sector_size) {
519 		printk(KERN_ERR "jffs2: Too few erase blocks (%d)\n", c->flash_size / c->sector_size);
520 		return -EINVAL;
521 	}
522 
523 	c->cleanmarker_size = sizeof(struct jffs2_unknown_node);
524 
525 	/* NAND (or other bizarre) flash... do setup accordingly */
526 	ret = jffs2_flash_setup(c);
527 	if (ret)
528 		return ret;
529 
530 	c->inocache_list = kcalloc(INOCACHE_HASHSIZE, sizeof(struct jffs2_inode_cache *), GFP_KERNEL);
531 	if (!c->inocache_list) {
532 		ret = -ENOMEM;
533 		goto out_wbuf;
534 	}
535 
536 	jffs2_init_xattr_subsystem(c);
537 
538 	if ((ret = jffs2_do_mount_fs(c)))
539 		goto out_inohash;
540 
541 	D1(printk(KERN_DEBUG "jffs2_do_fill_super(): Getting root inode\n"));
542 	root_i = jffs2_iget(sb, 1);
543 	if (IS_ERR(root_i)) {
544 		D1(printk(KERN_WARNING "get root inode failed\n"));
545 		ret = PTR_ERR(root_i);
546 		goto out_root;
547 	}
548 
549 	ret = -ENOMEM;
550 
551 	D1(printk(KERN_DEBUG "jffs2_do_fill_super(): d_alloc_root()\n"));
552 	sb->s_root = d_alloc_root(root_i);
553 	if (!sb->s_root)
554 		goto out_root_i;
555 
556 	sb->s_maxbytes = 0xFFFFFFFF;
557 	sb->s_blocksize = PAGE_CACHE_SIZE;
558 	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
559 	sb->s_magic = JFFS2_SUPER_MAGIC;
560 	if (!(sb->s_flags & MS_RDONLY))
561 		jffs2_start_garbage_collect_thread(c);
562 	return 0;
563 
564  out_root_i:
565 	iput(root_i);
566 out_root:
567 	jffs2_free_ino_caches(c);
568 	jffs2_free_raw_node_refs(c);
569 	if (jffs2_blocks_use_vmalloc(c))
570 		vfree(c->blocks);
571 	else
572 		kfree(c->blocks);
573  out_inohash:
574 	jffs2_clear_xattr_subsystem(c);
575 	kfree(c->inocache_list);
576  out_wbuf:
577 	jffs2_flash_cleanup(c);
578 
579 	return ret;
580 }
581 
582 void jffs2_gc_release_inode(struct jffs2_sb_info *c,
583 				   struct jffs2_inode_info *f)
584 {
585 	iput(OFNI_EDONI_2SFFJ(f));
586 }
587 
588 struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c,
589 						     int inum, int nlink)
590 {
591 	struct inode *inode;
592 	struct jffs2_inode_cache *ic;
593 	if (!nlink) {
594 		/* The inode has zero nlink but its nodes weren't yet marked
595 		   obsolete. This has to be because we're still waiting for
596 		   the final (close() and) iput() to happen.
597 
598 		   There's a possibility that the final iput() could have
599 		   happened while we were contemplating. In order to ensure
600 		   that we don't cause a new read_inode() (which would fail)
601 		   for the inode in question, we use ilookup() in this case
602 		   instead of iget().
603 
604 		   The nlink can't _become_ zero at this point because we're
605 		   holding the alloc_sem, and jffs2_do_unlink() would also
606 		   need that while decrementing nlink on any inode.
607 		*/
608 		inode = ilookup(OFNI_BS_2SFFJ(c), inum);
609 		if (!inode) {
610 			D1(printk(KERN_DEBUG "ilookup() failed for ino #%u; inode is probably deleted.\n",
611 				  inum));
612 
613 			spin_lock(&c->inocache_lock);
614 			ic = jffs2_get_ino_cache(c, inum);
615 			if (!ic) {
616 				D1(printk(KERN_DEBUG "Inode cache for ino #%u is gone.\n", inum));
617 				spin_unlock(&c->inocache_lock);
618 				return NULL;
619 			}
620 			if (ic->state != INO_STATE_CHECKEDABSENT) {
621 				/* Wait for progress. Don't just loop */
622 				D1(printk(KERN_DEBUG "Waiting for ino #%u in state %d\n",
623 					  ic->ino, ic->state));
624 				sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
625 			} else {
626 				spin_unlock(&c->inocache_lock);
627 			}
628 
629 			return NULL;
630 		}
631 	} else {
632 		/* Inode has links to it still; they're not going away because
633 		   jffs2_do_unlink() would need the alloc_sem and we have it.
634 		   Just iget() it, and if read_inode() is necessary that's OK.
635 		*/
636 		inode = jffs2_iget(OFNI_BS_2SFFJ(c), inum);
637 		if (IS_ERR(inode))
638 			return ERR_CAST(inode);
639 	}
640 	if (is_bad_inode(inode)) {
641 		printk(KERN_NOTICE "Eep. read_inode() failed for ino #%u. nlink %d\n",
642 		       inum, nlink);
643 		/* NB. This will happen again. We need to do something appropriate here. */
644 		iput(inode);
645 		return ERR_PTR(-EIO);
646 	}
647 
648 	return JFFS2_INODE_INFO(inode);
649 }
650 
651 unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c,
652 				   struct jffs2_inode_info *f,
653 				   unsigned long offset,
654 				   unsigned long *priv)
655 {
656 	struct inode *inode = OFNI_EDONI_2SFFJ(f);
657 	struct page *pg;
658 
659 	pg = read_cache_page_async(inode->i_mapping, offset >> PAGE_CACHE_SHIFT,
660 			     (void *)jffs2_do_readpage_unlock, inode);
661 	if (IS_ERR(pg))
662 		return (void *)pg;
663 
664 	*priv = (unsigned long)pg;
665 	return kmap(pg);
666 }
667 
668 void jffs2_gc_release_page(struct jffs2_sb_info *c,
669 			   unsigned char *ptr,
670 			   unsigned long *priv)
671 {
672 	struct page *pg = (void *)*priv;
673 
674 	kunmap(pg);
675 	page_cache_release(pg);
676 }
677 
678 static int jffs2_flash_setup(struct jffs2_sb_info *c) {
679 	int ret = 0;
680 
681 	if (jffs2_cleanmarker_oob(c)) {
682 		/* NAND flash... do setup accordingly */
683 		ret = jffs2_nand_flash_setup(c);
684 		if (ret)
685 			return ret;
686 	}
687 
688 	/* and Dataflash */
689 	if (jffs2_dataflash(c)) {
690 		ret = jffs2_dataflash_setup(c);
691 		if (ret)
692 			return ret;
693 	}
694 
695 	/* and Intel "Sibley" flash */
696 	if (jffs2_nor_wbuf_flash(c)) {
697 		ret = jffs2_nor_wbuf_flash_setup(c);
698 		if (ret)
699 			return ret;
700 	}
701 
702 	/* and an UBI volume */
703 	if (jffs2_ubivol(c)) {
704 		ret = jffs2_ubivol_setup(c);
705 		if (ret)
706 			return ret;
707 	}
708 
709 	return ret;
710 }
711 
712 void jffs2_flash_cleanup(struct jffs2_sb_info *c) {
713 
714 	if (jffs2_cleanmarker_oob(c)) {
715 		jffs2_nand_flash_cleanup(c);
716 	}
717 
718 	/* and DataFlash */
719 	if (jffs2_dataflash(c)) {
720 		jffs2_dataflash_cleanup(c);
721 	}
722 
723 	/* and Intel "Sibley" flash */
724 	if (jffs2_nor_wbuf_flash(c)) {
725 		jffs2_nor_wbuf_flash_cleanup(c);
726 	}
727 
728 	/* and an UBI volume */
729 	if (jffs2_ubivol(c)) {
730 		jffs2_ubivol_cleanup(c);
731 	}
732 }
733