xref: /openbmc/linux/fs/jffs2/fs.c (revision d8b23c61)
1 /*
2  * JFFS2 -- Journalling Flash File System, Version 2.
3  *
4  * Copyright © 2001-2007 Red Hat, Inc.
5  * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
6  *
7  * Created by David Woodhouse <dwmw2@infradead.org>
8  *
9  * For licensing information, see the file 'LICENCE' in this directory.
10  *
11  */
12 
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 
15 #include <linux/capability.h>
16 #include <linux/kernel.h>
17 #include <linux/sched.h>
18 #include <linux/cred.h>
19 #include <linux/fs.h>
20 #include <linux/fs_context.h>
21 #include <linux/list.h>
22 #include <linux/mtd/mtd.h>
23 #include <linux/pagemap.h>
24 #include <linux/slab.h>
25 #include <linux/vmalloc.h>
26 #include <linux/vfs.h>
27 #include <linux/crc32.h>
28 #include "nodelist.h"
29 
30 static int jffs2_flash_setup(struct jffs2_sb_info *c);
31 
jffs2_do_setattr(struct inode * inode,struct iattr * iattr)32 int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
33 {
34 	struct jffs2_full_dnode *old_metadata, *new_metadata;
35 	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
36 	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
37 	struct jffs2_raw_inode *ri;
38 	union jffs2_device_node dev;
39 	unsigned char *mdata = NULL;
40 	int mdatalen = 0;
41 	unsigned int ivalid;
42 	uint32_t alloclen;
43 	int ret;
44 	int alloc_type = ALLOC_NORMAL;
45 
46 	jffs2_dbg(1, "%s(): ino #%lu\n", __func__, inode->i_ino);
47 
48 	/* Special cases - we don't want more than one data node
49 	   for these types on the medium at any time. So setattr
50 	   must read the original data associated with the node
51 	   (i.e. the device numbers or the target name) and write
52 	   it out again with the appropriate data attached */
53 	if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) {
54 		/* For these, we don't actually need to read the old node */
55 		mdatalen = jffs2_encode_dev(&dev, inode->i_rdev);
56 		mdata = (char *)&dev;
57 		jffs2_dbg(1, "%s(): Writing %d bytes of kdev_t\n",
58 			  __func__, mdatalen);
59 	} else if (S_ISLNK(inode->i_mode)) {
60 		mutex_lock(&f->sem);
61 		mdatalen = f->metadata->size;
62 		mdata = kmalloc(f->metadata->size, GFP_USER);
63 		if (!mdata) {
64 			mutex_unlock(&f->sem);
65 			return -ENOMEM;
66 		}
67 		ret = jffs2_read_dnode(c, f, f->metadata, mdata, 0, mdatalen);
68 		if (ret) {
69 			mutex_unlock(&f->sem);
70 			kfree(mdata);
71 			return ret;
72 		}
73 		mutex_unlock(&f->sem);
74 		jffs2_dbg(1, "%s(): Writing %d bytes of symlink target\n",
75 			  __func__, mdatalen);
76 	}
77 
78 	ri = jffs2_alloc_raw_inode();
79 	if (!ri) {
80 		if (S_ISLNK(inode->i_mode))
81 			kfree(mdata);
82 		return -ENOMEM;
83 	}
84 
85 	ret = jffs2_reserve_space(c, sizeof(*ri) + mdatalen, &alloclen,
86 				  ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
87 	if (ret) {
88 		jffs2_free_raw_inode(ri);
89 		if (S_ISLNK(inode->i_mode))
90 			 kfree(mdata);
91 		return ret;
92 	}
93 	mutex_lock(&f->sem);
94 	ivalid = iattr->ia_valid;
95 
96 	ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
97 	ri->nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
98 	ri->totlen = cpu_to_je32(sizeof(*ri) + mdatalen);
99 	ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4));
100 
101 	ri->ino = cpu_to_je32(inode->i_ino);
102 	ri->version = cpu_to_je32(++f->highest_version);
103 
104 	ri->uid = cpu_to_je16((ivalid & ATTR_UID)?
105 		from_kuid(&init_user_ns, iattr->ia_uid):i_uid_read(inode));
106 	ri->gid = cpu_to_je16((ivalid & ATTR_GID)?
107 		from_kgid(&init_user_ns, iattr->ia_gid):i_gid_read(inode));
108 
109 	if (ivalid & ATTR_MODE)
110 		ri->mode = cpu_to_jemode(iattr->ia_mode);
111 	else
112 		ri->mode = cpu_to_jemode(inode->i_mode);
113 
114 
115 	ri->isize = cpu_to_je32((ivalid & ATTR_SIZE)?iattr->ia_size:inode->i_size);
116 	ri->atime = cpu_to_je32(I_SEC((ivalid & ATTR_ATIME)?iattr->ia_atime:inode->i_atime));
117 	ri->mtime = cpu_to_je32(I_SEC((ivalid & ATTR_MTIME)?iattr->ia_mtime:inode->i_mtime));
118 	ri->ctime = cpu_to_je32(I_SEC((ivalid & ATTR_CTIME)?iattr->ia_ctime:inode_get_ctime(inode)));
119 
120 	ri->offset = cpu_to_je32(0);
121 	ri->csize = ri->dsize = cpu_to_je32(mdatalen);
122 	ri->compr = JFFS2_COMPR_NONE;
123 	if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) {
124 		/* It's an extension. Make it a hole node */
125 		ri->compr = JFFS2_COMPR_ZERO;
126 		ri->dsize = cpu_to_je32(iattr->ia_size - inode->i_size);
127 		ri->offset = cpu_to_je32(inode->i_size);
128 	} else if (ivalid & ATTR_SIZE && !iattr->ia_size) {
129 		/* For truncate-to-zero, treat it as deletion because
130 		   it'll always be obsoleting all previous nodes */
131 		alloc_type = ALLOC_DELETION;
132 	}
133 	ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
134 	if (mdatalen)
135 		ri->data_crc = cpu_to_je32(crc32(0, mdata, mdatalen));
136 	else
137 		ri->data_crc = cpu_to_je32(0);
138 
139 	new_metadata = jffs2_write_dnode(c, f, ri, mdata, mdatalen, alloc_type);
140 	if (S_ISLNK(inode->i_mode))
141 		kfree(mdata);
142 
143 	if (IS_ERR(new_metadata)) {
144 		jffs2_complete_reservation(c);
145 		jffs2_free_raw_inode(ri);
146 		mutex_unlock(&f->sem);
147 		return PTR_ERR(new_metadata);
148 	}
149 	/* It worked. Update the inode */
150 	inode->i_atime = ITIME(je32_to_cpu(ri->atime));
151 	inode_set_ctime_to_ts(inode, ITIME(je32_to_cpu(ri->ctime)));
152 	inode->i_mtime = ITIME(je32_to_cpu(ri->mtime));
153 	inode->i_mode = jemode_to_cpu(ri->mode);
154 	i_uid_write(inode, je16_to_cpu(ri->uid));
155 	i_gid_write(inode, je16_to_cpu(ri->gid));
156 
157 
158 	old_metadata = f->metadata;
159 
160 	if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size)
161 		jffs2_truncate_fragtree (c, &f->fragtree, iattr->ia_size);
162 
163 	if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) {
164 		jffs2_add_full_dnode_to_inode(c, f, new_metadata);
165 		inode->i_size = iattr->ia_size;
166 		inode->i_blocks = (inode->i_size + 511) >> 9;
167 		f->metadata = NULL;
168 	} else {
169 		f->metadata = new_metadata;
170 	}
171 	if (old_metadata) {
172 		jffs2_mark_node_obsolete(c, old_metadata->raw);
173 		jffs2_free_full_dnode(old_metadata);
174 	}
175 	jffs2_free_raw_inode(ri);
176 
177 	mutex_unlock(&f->sem);
178 	jffs2_complete_reservation(c);
179 
180 	/* We have to do the truncate_setsize() without f->sem held, since
181 	   some pages may be locked and waiting for it in read_folio().
182 	   We are protected from a simultaneous write() extending i_size
183 	   back past iattr->ia_size, because do_truncate() holds the
184 	   generic inode semaphore. */
185 	if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size) {
186 		truncate_setsize(inode, iattr->ia_size);
187 		inode->i_blocks = (inode->i_size + 511) >> 9;
188 	}
189 
190 	return 0;
191 }
192 
jffs2_setattr(struct mnt_idmap * idmap,struct dentry * dentry,struct iattr * iattr)193 int jffs2_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
194 		  struct iattr *iattr)
195 {
196 	struct inode *inode = d_inode(dentry);
197 	int rc;
198 
199 	rc = setattr_prepare(&nop_mnt_idmap, dentry, iattr);
200 	if (rc)
201 		return rc;
202 
203 	rc = jffs2_do_setattr(inode, iattr);
204 	if (!rc && (iattr->ia_valid & ATTR_MODE))
205 		rc = posix_acl_chmod(&nop_mnt_idmap, dentry, inode->i_mode);
206 
207 	return rc;
208 }
209 
jffs2_statfs(struct dentry * dentry,struct kstatfs * buf)210 int jffs2_statfs(struct dentry *dentry, struct kstatfs *buf)
211 {
212 	struct jffs2_sb_info *c = JFFS2_SB_INFO(dentry->d_sb);
213 	unsigned long avail;
214 
215 	buf->f_type = JFFS2_SUPER_MAGIC;
216 	buf->f_bsize = 1 << PAGE_SHIFT;
217 	buf->f_blocks = c->flash_size >> PAGE_SHIFT;
218 	buf->f_files = 0;
219 	buf->f_ffree = 0;
220 	buf->f_namelen = JFFS2_MAX_NAME_LEN;
221 	buf->f_fsid.val[0] = JFFS2_SUPER_MAGIC;
222 	buf->f_fsid.val[1] = c->mtd->index;
223 
224 	spin_lock(&c->erase_completion_lock);
225 	avail = c->dirty_size + c->free_size;
226 	if (avail > c->sector_size * c->resv_blocks_write)
227 		avail -= c->sector_size * c->resv_blocks_write;
228 	else
229 		avail = 0;
230 	spin_unlock(&c->erase_completion_lock);
231 
232 	buf->f_bavail = buf->f_bfree = avail >> PAGE_SHIFT;
233 
234 	return 0;
235 }
236 
237 
jffs2_evict_inode(struct inode * inode)238 void jffs2_evict_inode (struct inode *inode)
239 {
240 	/* We can forget about this inode for now - drop all
241 	 *  the nodelists associated with it, etc.
242 	 */
243 	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
244 	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
245 
246 	jffs2_dbg(1, "%s(): ino #%lu mode %o\n",
247 		  __func__, inode->i_ino, inode->i_mode);
248 	truncate_inode_pages_final(&inode->i_data);
249 	clear_inode(inode);
250 	jffs2_do_clear_inode(c, f);
251 }
252 
jffs2_iget(struct super_block * sb,unsigned long ino)253 struct inode *jffs2_iget(struct super_block *sb, unsigned long ino)
254 {
255 	struct jffs2_inode_info *f;
256 	struct jffs2_sb_info *c;
257 	struct jffs2_raw_inode latest_node;
258 	union jffs2_device_node jdev;
259 	struct inode *inode;
260 	dev_t rdev = 0;
261 	int ret;
262 
263 	jffs2_dbg(1, "%s(): ino == %lu\n", __func__, ino);
264 
265 	inode = iget_locked(sb, ino);
266 	if (!inode)
267 		return ERR_PTR(-ENOMEM);
268 	if (!(inode->i_state & I_NEW))
269 		return inode;
270 
271 	f = JFFS2_INODE_INFO(inode);
272 	c = JFFS2_SB_INFO(inode->i_sb);
273 
274 	jffs2_init_inode_info(f);
275 	mutex_lock(&f->sem);
276 
277 	ret = jffs2_do_read_inode(c, f, inode->i_ino, &latest_node);
278 	if (ret)
279 		goto error;
280 
281 	inode->i_mode = jemode_to_cpu(latest_node.mode);
282 	i_uid_write(inode, je16_to_cpu(latest_node.uid));
283 	i_gid_write(inode, je16_to_cpu(latest_node.gid));
284 	inode->i_size = je32_to_cpu(latest_node.isize);
285 	inode->i_atime = ITIME(je32_to_cpu(latest_node.atime));
286 	inode->i_mtime = ITIME(je32_to_cpu(latest_node.mtime));
287 	inode_set_ctime_to_ts(inode, ITIME(je32_to_cpu(latest_node.ctime)));
288 
289 	set_nlink(inode, f->inocache->pino_nlink);
290 
291 	inode->i_blocks = (inode->i_size + 511) >> 9;
292 
293 	switch (inode->i_mode & S_IFMT) {
294 
295 	case S_IFLNK:
296 		inode->i_op = &jffs2_symlink_inode_operations;
297 		inode->i_link = f->target;
298 		break;
299 
300 	case S_IFDIR:
301 	{
302 		struct jffs2_full_dirent *fd;
303 		set_nlink(inode, 2); /* parent and '.' */
304 
305 		for (fd=f->dents; fd; fd = fd->next) {
306 			if (fd->type == DT_DIR && fd->ino)
307 				inc_nlink(inode);
308 		}
309 		/* Root dir gets i_nlink 3 for some reason */
310 		if (inode->i_ino == 1)
311 			inc_nlink(inode);
312 
313 		inode->i_op = &jffs2_dir_inode_operations;
314 		inode->i_fop = &jffs2_dir_operations;
315 		break;
316 	}
317 	case S_IFREG:
318 		inode->i_op = &jffs2_file_inode_operations;
319 		inode->i_fop = &jffs2_file_operations;
320 		inode->i_mapping->a_ops = &jffs2_file_address_operations;
321 		inode->i_mapping->nrpages = 0;
322 		break;
323 
324 	case S_IFBLK:
325 	case S_IFCHR:
326 		/* Read the device numbers from the media */
327 		if (f->metadata->size != sizeof(jdev.old_id) &&
328 		    f->metadata->size != sizeof(jdev.new_id)) {
329 			pr_notice("Device node has strange size %d\n",
330 				  f->metadata->size);
331 			goto error_io;
332 		}
333 		jffs2_dbg(1, "Reading device numbers from flash\n");
334 		ret = jffs2_read_dnode(c, f, f->metadata, (char *)&jdev, 0, f->metadata->size);
335 		if (ret < 0) {
336 			/* Eep */
337 			pr_notice("Read device numbers for inode %lu failed\n",
338 				  (unsigned long)inode->i_ino);
339 			goto error;
340 		}
341 		if (f->metadata->size == sizeof(jdev.old_id))
342 			rdev = old_decode_dev(je16_to_cpu(jdev.old_id));
343 		else
344 			rdev = new_decode_dev(je32_to_cpu(jdev.new_id));
345 		fallthrough;
346 
347 	case S_IFSOCK:
348 	case S_IFIFO:
349 		inode->i_op = &jffs2_file_inode_operations;
350 		init_special_inode(inode, inode->i_mode, rdev);
351 		break;
352 
353 	default:
354 		pr_warn("%s(): Bogus i_mode %o for ino %lu\n",
355 			__func__, inode->i_mode, (unsigned long)inode->i_ino);
356 	}
357 
358 	mutex_unlock(&f->sem);
359 
360 	jffs2_dbg(1, "jffs2_read_inode() returning\n");
361 	unlock_new_inode(inode);
362 	return inode;
363 
364 error_io:
365 	ret = -EIO;
366 error:
367 	mutex_unlock(&f->sem);
368 	iget_failed(inode);
369 	return ERR_PTR(ret);
370 }
371 
jffs2_dirty_inode(struct inode * inode,int flags)372 void jffs2_dirty_inode(struct inode *inode, int flags)
373 {
374 	struct iattr iattr;
375 
376 	if (!(inode->i_state & I_DIRTY_DATASYNC)) {
377 		jffs2_dbg(2, "%s(): not calling setattr() for ino #%lu\n",
378 			  __func__, inode->i_ino);
379 		return;
380 	}
381 
382 	jffs2_dbg(1, "%s(): calling setattr() for ino #%lu\n",
383 		  __func__, inode->i_ino);
384 
385 	iattr.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_MTIME|ATTR_CTIME;
386 	iattr.ia_mode = inode->i_mode;
387 	iattr.ia_uid = inode->i_uid;
388 	iattr.ia_gid = inode->i_gid;
389 	iattr.ia_atime = inode->i_atime;
390 	iattr.ia_mtime = inode->i_mtime;
391 	iattr.ia_ctime = inode_get_ctime(inode);
392 
393 	jffs2_do_setattr(inode, &iattr);
394 }
395 
jffs2_do_remount_fs(struct super_block * sb,struct fs_context * fc)396 int jffs2_do_remount_fs(struct super_block *sb, struct fs_context *fc)
397 {
398 	struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
399 
400 	if (c->flags & JFFS2_SB_FLAG_RO && !sb_rdonly(sb))
401 		return -EROFS;
402 
403 	/* We stop if it was running, then restart if it needs to.
404 	   This also catches the case where it was stopped and this
405 	   is just a remount to restart it.
406 	   Flush the writebuffer, if necessary, else we loose it */
407 	if (!sb_rdonly(sb)) {
408 		jffs2_stop_garbage_collect_thread(c);
409 		mutex_lock(&c->alloc_sem);
410 		jffs2_flush_wbuf_pad(c);
411 		mutex_unlock(&c->alloc_sem);
412 	}
413 
414 	if (!(fc->sb_flags & SB_RDONLY))
415 		jffs2_start_garbage_collect_thread(c);
416 
417 	fc->sb_flags |= SB_NOATIME;
418 	return 0;
419 }
420 
421 /* jffs2_new_inode: allocate a new inode and inocache, add it to the hash,
422    fill in the raw_inode while you're at it. */
jffs2_new_inode(struct inode * dir_i,umode_t mode,struct jffs2_raw_inode * ri)423 struct inode *jffs2_new_inode (struct inode *dir_i, umode_t mode, struct jffs2_raw_inode *ri)
424 {
425 	struct inode *inode;
426 	struct super_block *sb = dir_i->i_sb;
427 	struct jffs2_sb_info *c;
428 	struct jffs2_inode_info *f;
429 	int ret;
430 
431 	jffs2_dbg(1, "%s(): dir_i %ld, mode 0x%x\n",
432 		  __func__, dir_i->i_ino, mode);
433 
434 	c = JFFS2_SB_INFO(sb);
435 
436 	inode = new_inode(sb);
437 
438 	if (!inode)
439 		return ERR_PTR(-ENOMEM);
440 
441 	f = JFFS2_INODE_INFO(inode);
442 	jffs2_init_inode_info(f);
443 	mutex_lock(&f->sem);
444 
445 	memset(ri, 0, sizeof(*ri));
446 	/* Set OS-specific defaults for new inodes */
447 	ri->uid = cpu_to_je16(from_kuid(&init_user_ns, current_fsuid()));
448 
449 	if (dir_i->i_mode & S_ISGID) {
450 		ri->gid = cpu_to_je16(i_gid_read(dir_i));
451 		if (S_ISDIR(mode))
452 			mode |= S_ISGID;
453 	} else {
454 		ri->gid = cpu_to_je16(from_kgid(&init_user_ns, current_fsgid()));
455 	}
456 
457 	/* POSIX ACLs have to be processed now, at least partly.
458 	   The umask is only applied if there's no default ACL */
459 	ret = jffs2_init_acl_pre(dir_i, inode, &mode);
460 	if (ret) {
461 		mutex_unlock(&f->sem);
462 		make_bad_inode(inode);
463 		iput(inode);
464 		return ERR_PTR(ret);
465 	}
466 	ret = jffs2_do_new_inode (c, f, mode, ri);
467 	if (ret) {
468 		mutex_unlock(&f->sem);
469 		make_bad_inode(inode);
470 		iput(inode);
471 		return ERR_PTR(ret);
472 	}
473 	set_nlink(inode, 1);
474 	inode->i_ino = je32_to_cpu(ri->ino);
475 	inode->i_mode = jemode_to_cpu(ri->mode);
476 	i_gid_write(inode, je16_to_cpu(ri->gid));
477 	i_uid_write(inode, je16_to_cpu(ri->uid));
478 	inode->i_atime = inode->i_mtime = inode_set_ctime_current(inode);
479 	ri->atime = ri->mtime = ri->ctime = cpu_to_je32(I_SEC(inode->i_mtime));
480 
481 	inode->i_blocks = 0;
482 	inode->i_size = 0;
483 
484 	if (insert_inode_locked(inode) < 0) {
485 		mutex_unlock(&f->sem);
486 		make_bad_inode(inode);
487 		iput(inode);
488 		return ERR_PTR(-EINVAL);
489 	}
490 
491 	return inode;
492 }
493 
calculate_inocache_hashsize(uint32_t flash_size)494 static int calculate_inocache_hashsize(uint32_t flash_size)
495 {
496 	/*
497 	 * Pick a inocache hash size based on the size of the medium.
498 	 * Count how many megabytes we're dealing with, apply a hashsize twice
499 	 * that size, but rounding down to the usual big powers of 2. And keep
500 	 * to sensible bounds.
501 	 */
502 
503 	int size_mb = flash_size / 1024 / 1024;
504 	int hashsize = (size_mb * 2) & ~0x3f;
505 
506 	if (hashsize < INOCACHE_HASHSIZE_MIN)
507 		return INOCACHE_HASHSIZE_MIN;
508 	if (hashsize > INOCACHE_HASHSIZE_MAX)
509 		return INOCACHE_HASHSIZE_MAX;
510 
511 	return hashsize;
512 }
513 
jffs2_do_fill_super(struct super_block * sb,struct fs_context * fc)514 int jffs2_do_fill_super(struct super_block *sb, struct fs_context *fc)
515 {
516 	struct jffs2_sb_info *c;
517 	struct inode *root_i;
518 	int ret;
519 	size_t blocks;
520 
521 	c = JFFS2_SB_INFO(sb);
522 
523 	/* Do not support the MLC nand */
524 	if (c->mtd->type == MTD_MLCNANDFLASH)
525 		return -EINVAL;
526 
527 #ifndef CONFIG_JFFS2_FS_WRITEBUFFER
528 	if (c->mtd->type == MTD_NANDFLASH) {
529 		errorf(fc, "Cannot operate on NAND flash unless jffs2 NAND support is compiled in");
530 		return -EINVAL;
531 	}
532 	if (c->mtd->type == MTD_DATAFLASH) {
533 		errorf(fc, "Cannot operate on DataFlash unless jffs2 DataFlash support is compiled in");
534 		return -EINVAL;
535 	}
536 #endif
537 
538 	c->flash_size = c->mtd->size;
539 	c->sector_size = c->mtd->erasesize;
540 	blocks = c->flash_size / c->sector_size;
541 
542 	/*
543 	 * Size alignment check
544 	 */
545 	if ((c->sector_size * blocks) != c->flash_size) {
546 		c->flash_size = c->sector_size * blocks;
547 		infof(fc, "Flash size not aligned to erasesize, reducing to %dKiB",
548 		      c->flash_size / 1024);
549 	}
550 
551 	if (c->flash_size < 5*c->sector_size) {
552 		errorf(fc, "Too few erase blocks (%d)",
553 		       c->flash_size / c->sector_size);
554 		return -EINVAL;
555 	}
556 
557 	c->cleanmarker_size = sizeof(struct jffs2_unknown_node);
558 
559 	/* NAND (or other bizarre) flash... do setup accordingly */
560 	ret = jffs2_flash_setup(c);
561 	if (ret)
562 		return ret;
563 
564 	c->inocache_hashsize = calculate_inocache_hashsize(c->flash_size);
565 	c->inocache_list = kcalloc(c->inocache_hashsize, sizeof(struct jffs2_inode_cache *), GFP_KERNEL);
566 	if (!c->inocache_list) {
567 		ret = -ENOMEM;
568 		goto out_wbuf;
569 	}
570 
571 	jffs2_init_xattr_subsystem(c);
572 
573 	if ((ret = jffs2_do_mount_fs(c)))
574 		goto out_inohash;
575 
576 	jffs2_dbg(1, "%s(): Getting root inode\n", __func__);
577 	root_i = jffs2_iget(sb, 1);
578 	if (IS_ERR(root_i)) {
579 		jffs2_dbg(1, "get root inode failed\n");
580 		ret = PTR_ERR(root_i);
581 		goto out_root;
582 	}
583 
584 	ret = -ENOMEM;
585 
586 	jffs2_dbg(1, "%s(): d_make_root()\n", __func__);
587 	sb->s_root = d_make_root(root_i);
588 	if (!sb->s_root)
589 		goto out_root;
590 
591 	sb->s_maxbytes = 0xFFFFFFFF;
592 	sb->s_blocksize = PAGE_SIZE;
593 	sb->s_blocksize_bits = PAGE_SHIFT;
594 	sb->s_magic = JFFS2_SUPER_MAGIC;
595 	sb->s_time_min = 0;
596 	sb->s_time_max = U32_MAX;
597 
598 	if (!sb_rdonly(sb))
599 		jffs2_start_garbage_collect_thread(c);
600 	return 0;
601 
602 out_root:
603 	jffs2_free_ino_caches(c);
604 	jffs2_free_raw_node_refs(c);
605 	kvfree(c->blocks);
606 	jffs2_clear_xattr_subsystem(c);
607 	jffs2_sum_exit(c);
608  out_inohash:
609 	kfree(c->inocache_list);
610  out_wbuf:
611 	jffs2_flash_cleanup(c);
612 
613 	return ret;
614 }
615 
jffs2_gc_release_inode(struct jffs2_sb_info * c,struct jffs2_inode_info * f)616 void jffs2_gc_release_inode(struct jffs2_sb_info *c,
617 				   struct jffs2_inode_info *f)
618 {
619 	iput(OFNI_EDONI_2SFFJ(f));
620 }
621 
jffs2_gc_fetch_inode(struct jffs2_sb_info * c,int inum,int unlinked)622 struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c,
623 					      int inum, int unlinked)
624 {
625 	struct inode *inode;
626 	struct jffs2_inode_cache *ic;
627 
628 	if (unlinked) {
629 		/* The inode has zero nlink but its nodes weren't yet marked
630 		   obsolete. This has to be because we're still waiting for
631 		   the final (close() and) iput() to happen.
632 
633 		   There's a possibility that the final iput() could have
634 		   happened while we were contemplating. In order to ensure
635 		   that we don't cause a new read_inode() (which would fail)
636 		   for the inode in question, we use ilookup() in this case
637 		   instead of iget().
638 
639 		   The nlink can't _become_ zero at this point because we're
640 		   holding the alloc_sem, and jffs2_do_unlink() would also
641 		   need that while decrementing nlink on any inode.
642 		*/
643 		inode = ilookup(OFNI_BS_2SFFJ(c), inum);
644 		if (!inode) {
645 			jffs2_dbg(1, "ilookup() failed for ino #%u; inode is probably deleted.\n",
646 				  inum);
647 
648 			spin_lock(&c->inocache_lock);
649 			ic = jffs2_get_ino_cache(c, inum);
650 			if (!ic) {
651 				jffs2_dbg(1, "Inode cache for ino #%u is gone\n",
652 					  inum);
653 				spin_unlock(&c->inocache_lock);
654 				return NULL;
655 			}
656 			if (ic->state != INO_STATE_CHECKEDABSENT) {
657 				/* Wait for progress. Don't just loop */
658 				jffs2_dbg(1, "Waiting for ino #%u in state %d\n",
659 					  ic->ino, ic->state);
660 				sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
661 			} else {
662 				spin_unlock(&c->inocache_lock);
663 			}
664 
665 			return NULL;
666 		}
667 	} else {
668 		/* Inode has links to it still; they're not going away because
669 		   jffs2_do_unlink() would need the alloc_sem and we have it.
670 		   Just iget() it, and if read_inode() is necessary that's OK.
671 		*/
672 		inode = jffs2_iget(OFNI_BS_2SFFJ(c), inum);
673 		if (IS_ERR(inode))
674 			return ERR_CAST(inode);
675 	}
676 	if (is_bad_inode(inode)) {
677 		pr_notice("Eep. read_inode() failed for ino #%u. unlinked %d\n",
678 			  inum, unlinked);
679 		/* NB. This will happen again. We need to do something appropriate here. */
680 		iput(inode);
681 		return ERR_PTR(-EIO);
682 	}
683 
684 	return JFFS2_INODE_INFO(inode);
685 }
686 
jffs2_flash_setup(struct jffs2_sb_info * c)687 static int jffs2_flash_setup(struct jffs2_sb_info *c) {
688 	int ret = 0;
689 
690 	if (jffs2_cleanmarker_oob(c)) {
691 		/* NAND flash... do setup accordingly */
692 		ret = jffs2_nand_flash_setup(c);
693 		if (ret)
694 			return ret;
695 	}
696 
697 	/* and Dataflash */
698 	if (jffs2_dataflash(c)) {
699 		ret = jffs2_dataflash_setup(c);
700 		if (ret)
701 			return ret;
702 	}
703 
704 	/* and Intel "Sibley" flash */
705 	if (jffs2_nor_wbuf_flash(c)) {
706 		ret = jffs2_nor_wbuf_flash_setup(c);
707 		if (ret)
708 			return ret;
709 	}
710 
711 	/* and an UBI volume */
712 	if (jffs2_ubivol(c)) {
713 		ret = jffs2_ubivol_setup(c);
714 		if (ret)
715 			return ret;
716 	}
717 
718 	return ret;
719 }
720 
jffs2_flash_cleanup(struct jffs2_sb_info * c)721 void jffs2_flash_cleanup(struct jffs2_sb_info *c) {
722 
723 	if (jffs2_cleanmarker_oob(c)) {
724 		jffs2_nand_flash_cleanup(c);
725 	}
726 
727 	/* and DataFlash */
728 	if (jffs2_dataflash(c)) {
729 		jffs2_dataflash_cleanup(c);
730 	}
731 
732 	/* and Intel "Sibley" flash */
733 	if (jffs2_nor_wbuf_flash(c)) {
734 		jffs2_nor_wbuf_flash_cleanup(c);
735 	}
736 
737 	/* and an UBI volume */
738 	if (jffs2_ubivol(c)) {
739 		jffs2_ubivol_cleanup(c);
740 	}
741 }
742