xref: /openbmc/linux/fs/jffs2/wbuf.c (revision 5a528957)
1 /*
2  * JFFS2 -- Journalling Flash File System, Version 2.
3  *
4  * Copyright © 2001-2007 Red Hat, Inc.
5  * Copyright © 2004 Thomas Gleixner <tglx@linutronix.de>
6  *
7  * Created by David Woodhouse <dwmw2@infradead.org>
8  * Modified debugged and enhanced by Thomas Gleixner <tglx@linutronix.de>
9  *
10  * For licensing information, see the file 'LICENCE' in this directory.
11  *
12  */
13 
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 
16 #include <linux/kernel.h>
17 #include <linux/slab.h>
18 #include <linux/mtd/mtd.h>
19 #include <linux/crc32.h>
20 #include <linux/mtd/nand.h>
21 #include <linux/jiffies.h>
22 #include <linux/sched.h>
23 
24 #include "nodelist.h"
25 
26 /* For testing write failures */
27 #undef BREAKME
28 #undef BREAKMEHEADER
29 
30 #ifdef BREAKME
31 static unsigned char *brokenbuf;
32 #endif
33 
34 #define PAGE_DIV(x) ( ((unsigned long)(x) / (unsigned long)(c->wbuf_pagesize)) * (unsigned long)(c->wbuf_pagesize) )
35 #define PAGE_MOD(x) ( (unsigned long)(x) % (unsigned long)(c->wbuf_pagesize) )
36 
37 /* max. erase failures before we mark a block bad */
38 #define MAX_ERASE_FAILURES 	2
39 
40 struct jffs2_inodirty {
41 	uint32_t ino;
42 	struct jffs2_inodirty *next;
43 };
44 
45 static struct jffs2_inodirty inodirty_nomem;
46 
47 static int jffs2_wbuf_pending_for_ino(struct jffs2_sb_info *c, uint32_t ino)
48 {
49 	struct jffs2_inodirty *this = c->wbuf_inodes;
50 
51 	/* If a malloc failed, consider _everything_ dirty */
52 	if (this == &inodirty_nomem)
53 		return 1;
54 
55 	/* If ino == 0, _any_ non-GC writes mean 'yes' */
56 	if (this && !ino)
57 		return 1;
58 
59 	/* Look to see if the inode in question is pending in the wbuf */
60 	while (this) {
61 		if (this->ino == ino)
62 			return 1;
63 		this = this->next;
64 	}
65 	return 0;
66 }
67 
68 static void jffs2_clear_wbuf_ino_list(struct jffs2_sb_info *c)
69 {
70 	struct jffs2_inodirty *this;
71 
72 	this = c->wbuf_inodes;
73 
74 	if (this != &inodirty_nomem) {
75 		while (this) {
76 			struct jffs2_inodirty *next = this->next;
77 			kfree(this);
78 			this = next;
79 		}
80 	}
81 	c->wbuf_inodes = NULL;
82 }
83 
84 static void jffs2_wbuf_dirties_inode(struct jffs2_sb_info *c, uint32_t ino)
85 {
86 	struct jffs2_inodirty *new;
87 
88 	/* Mark the superblock dirty so that kupdated will flush... */
89 	jffs2_dirty_trigger(c);
90 
91 	if (jffs2_wbuf_pending_for_ino(c, ino))
92 		return;
93 
94 	new = kmalloc(sizeof(*new), GFP_KERNEL);
95 	if (!new) {
96 		jffs2_dbg(1, "No memory to allocate inodirty. Fallback to all considered dirty\n");
97 		jffs2_clear_wbuf_ino_list(c);
98 		c->wbuf_inodes = &inodirty_nomem;
99 		return;
100 	}
101 	new->ino = ino;
102 	new->next = c->wbuf_inodes;
103 	c->wbuf_inodes = new;
104 	return;
105 }
106 
107 static inline void jffs2_refile_wbuf_blocks(struct jffs2_sb_info *c)
108 {
109 	struct list_head *this, *next;
110 	static int n;
111 
112 	if (list_empty(&c->erasable_pending_wbuf_list))
113 		return;
114 
115 	list_for_each_safe(this, next, &c->erasable_pending_wbuf_list) {
116 		struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
117 
118 		jffs2_dbg(1, "Removing eraseblock at 0x%08x from erasable_pending_wbuf_list...\n",
119 			  jeb->offset);
120 		list_del(this);
121 		if ((jiffies + (n++)) & 127) {
122 			/* Most of the time, we just erase it immediately. Otherwise we
123 			   spend ages scanning it on mount, etc. */
124 			jffs2_dbg(1, "...and adding to erase_pending_list\n");
125 			list_add_tail(&jeb->list, &c->erase_pending_list);
126 			c->nr_erasing_blocks++;
127 			jffs2_garbage_collect_trigger(c);
128 		} else {
129 			/* Sometimes, however, we leave it elsewhere so it doesn't get
130 			   immediately reused, and we spread the load a bit. */
131 			jffs2_dbg(1, "...and adding to erasable_list\n");
132 			list_add_tail(&jeb->list, &c->erasable_list);
133 		}
134 	}
135 }
136 
137 #define REFILE_NOTEMPTY 0
138 #define REFILE_ANYWAY   1
139 
140 static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int allow_empty)
141 {
142 	jffs2_dbg(1, "About to refile bad block at %08x\n", jeb->offset);
143 
144 	/* File the existing block on the bad_used_list.... */
145 	if (c->nextblock == jeb)
146 		c->nextblock = NULL;
147 	else /* Not sure this should ever happen... need more coffee */
148 		list_del(&jeb->list);
149 	if (jeb->first_node) {
150 		jffs2_dbg(1, "Refiling block at %08x to bad_used_list\n",
151 			  jeb->offset);
152 		list_add(&jeb->list, &c->bad_used_list);
153 	} else {
154 		BUG_ON(allow_empty == REFILE_NOTEMPTY);
155 		/* It has to have had some nodes or we couldn't be here */
156 		jffs2_dbg(1, "Refiling block at %08x to erase_pending_list\n",
157 			  jeb->offset);
158 		list_add(&jeb->list, &c->erase_pending_list);
159 		c->nr_erasing_blocks++;
160 		jffs2_garbage_collect_trigger(c);
161 	}
162 
163 	if (!jffs2_prealloc_raw_node_refs(c, jeb, 1)) {
164 		uint32_t oldfree = jeb->free_size;
165 
166 		jffs2_link_node_ref(c, jeb,
167 				    (jeb->offset+c->sector_size-oldfree) | REF_OBSOLETE,
168 				    oldfree, NULL);
169 		/* convert to wasted */
170 		c->wasted_size += oldfree;
171 		jeb->wasted_size += oldfree;
172 		c->dirty_size -= oldfree;
173 		jeb->dirty_size -= oldfree;
174 	}
175 
176 	jffs2_dbg_dump_block_lists_nolock(c);
177 	jffs2_dbg_acct_sanity_check_nolock(c,jeb);
178 	jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
179 }
180 
181 static struct jffs2_raw_node_ref **jffs2_incore_replace_raw(struct jffs2_sb_info *c,
182 							    struct jffs2_inode_info *f,
183 							    struct jffs2_raw_node_ref *raw,
184 							    union jffs2_node_union *node)
185 {
186 	struct jffs2_node_frag *frag;
187 	struct jffs2_full_dirent *fd;
188 
189 	dbg_noderef("incore_replace_raw: node at %p is {%04x,%04x}\n",
190 		    node, je16_to_cpu(node->u.magic), je16_to_cpu(node->u.nodetype));
191 
192 	BUG_ON(je16_to_cpu(node->u.magic) != 0x1985 &&
193 	       je16_to_cpu(node->u.magic) != 0);
194 
195 	switch (je16_to_cpu(node->u.nodetype)) {
196 	case JFFS2_NODETYPE_INODE:
197 		if (f->metadata && f->metadata->raw == raw) {
198 			dbg_noderef("Will replace ->raw in f->metadata at %p\n", f->metadata);
199 			return &f->metadata->raw;
200 		}
201 		frag = jffs2_lookup_node_frag(&f->fragtree, je32_to_cpu(node->i.offset));
202 		BUG_ON(!frag);
203 		/* Find a frag which refers to the full_dnode we want to modify */
204 		while (!frag->node || frag->node->raw != raw) {
205 			frag = frag_next(frag);
206 			BUG_ON(!frag);
207 		}
208 		dbg_noderef("Will replace ->raw in full_dnode at %p\n", frag->node);
209 		return &frag->node->raw;
210 
211 	case JFFS2_NODETYPE_DIRENT:
212 		for (fd = f->dents; fd; fd = fd->next) {
213 			if (fd->raw == raw) {
214 				dbg_noderef("Will replace ->raw in full_dirent at %p\n", fd);
215 				return &fd->raw;
216 			}
217 		}
218 		BUG();
219 
220 	default:
221 		dbg_noderef("Don't care about replacing raw for nodetype %x\n",
222 			    je16_to_cpu(node->u.nodetype));
223 		break;
224 	}
225 	return NULL;
226 }
227 
228 #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
229 static int jffs2_verify_write(struct jffs2_sb_info *c, unsigned char *buf,
230 			      uint32_t ofs)
231 {
232 	int ret;
233 	size_t retlen;
234 	char *eccstr;
235 
236 	ret = mtd_read(c->mtd, ofs, c->wbuf_pagesize, &retlen, c->wbuf_verify);
237 	if (ret && ret != -EUCLEAN && ret != -EBADMSG) {
238 		pr_warn("%s(): Read back of page at %08x failed: %d\n",
239 			__func__, c->wbuf_ofs, ret);
240 		return ret;
241 	} else if (retlen != c->wbuf_pagesize) {
242 		pr_warn("%s(): Read back of page at %08x gave short read: %zd not %d\n",
243 			__func__, ofs, retlen, c->wbuf_pagesize);
244 		return -EIO;
245 	}
246 	if (!memcmp(buf, c->wbuf_verify, c->wbuf_pagesize))
247 		return 0;
248 
249 	if (ret == -EUCLEAN)
250 		eccstr = "corrected";
251 	else if (ret == -EBADMSG)
252 		eccstr = "correction failed";
253 	else
254 		eccstr = "OK or unused";
255 
256 	pr_warn("Write verify error (ECC %s) at %08x. Wrote:\n",
257 		eccstr, c->wbuf_ofs);
258 	print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1,
259 		       c->wbuf, c->wbuf_pagesize, 0);
260 
261 	pr_warn("Read back:\n");
262 	print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1,
263 		       c->wbuf_verify, c->wbuf_pagesize, 0);
264 
265 	return -EIO;
266 }
267 #else
268 #define jffs2_verify_write(c,b,o) (0)
269 #endif
270 
271 /* Recover from failure to write wbuf. Recover the nodes up to the
272  * wbuf, not the one which we were starting to try to write. */
273 
274 static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
275 {
276 	struct jffs2_eraseblock *jeb, *new_jeb;
277 	struct jffs2_raw_node_ref *raw, *next, *first_raw = NULL;
278 	size_t retlen;
279 	int ret;
280 	int nr_refile = 0;
281 	unsigned char *buf;
282 	uint32_t start, end, ofs, len;
283 
284 	jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
285 
286 	spin_lock(&c->erase_completion_lock);
287 	if (c->wbuf_ofs % c->mtd->erasesize)
288 		jffs2_block_refile(c, jeb, REFILE_NOTEMPTY);
289 	else
290 		jffs2_block_refile(c, jeb, REFILE_ANYWAY);
291 	spin_unlock(&c->erase_completion_lock);
292 
293 	BUG_ON(!ref_obsolete(jeb->last_node));
294 
295 	/* Find the first node to be recovered, by skipping over every
296 	   node which ends before the wbuf starts, or which is obsolete. */
297 	for (next = raw = jeb->first_node; next; raw = next) {
298 		next = ref_next(raw);
299 
300 		if (ref_obsolete(raw) ||
301 		    (next && ref_offset(next) <= c->wbuf_ofs)) {
302 			dbg_noderef("Skipping node at 0x%08x(%d)-0x%08x which is either before 0x%08x or obsolete\n",
303 				    ref_offset(raw), ref_flags(raw),
304 				    (ref_offset(raw) + ref_totlen(c, jeb, raw)),
305 				    c->wbuf_ofs);
306 			continue;
307 		}
308 		dbg_noderef("First node to be recovered is at 0x%08x(%d)-0x%08x\n",
309 			    ref_offset(raw), ref_flags(raw),
310 			    (ref_offset(raw) + ref_totlen(c, jeb, raw)));
311 
312 		first_raw = raw;
313 		break;
314 	}
315 
316 	if (!first_raw) {
317 		/* All nodes were obsolete. Nothing to recover. */
318 		jffs2_dbg(1, "No non-obsolete nodes to be recovered. Just filing block bad\n");
319 		c->wbuf_len = 0;
320 		return;
321 	}
322 
323 	start = ref_offset(first_raw);
324 	end = ref_offset(jeb->last_node);
325 	nr_refile = 1;
326 
327 	/* Count the number of refs which need to be copied */
328 	while ((raw = ref_next(raw)) != jeb->last_node)
329 		nr_refile++;
330 
331 	dbg_noderef("wbuf recover %08x-%08x (%d bytes in %d nodes)\n",
332 		    start, end, end - start, nr_refile);
333 
334 	buf = NULL;
335 	if (start < c->wbuf_ofs) {
336 		/* First affected node was already partially written.
337 		 * Attempt to reread the old data into our buffer. */
338 
339 		buf = kmalloc(end - start, GFP_KERNEL);
340 		if (!buf) {
341 			pr_crit("Malloc failure in wbuf recovery. Data loss ensues.\n");
342 
343 			goto read_failed;
344 		}
345 
346 		/* Do the read... */
347 		ret = mtd_read(c->mtd, start, c->wbuf_ofs - start, &retlen,
348 			       buf);
349 
350 		/* ECC recovered ? */
351 		if ((ret == -EUCLEAN || ret == -EBADMSG) &&
352 		    (retlen == c->wbuf_ofs - start))
353 			ret = 0;
354 
355 		if (ret || retlen != c->wbuf_ofs - start) {
356 			pr_crit("Old data are already lost in wbuf recovery. Data loss ensues.\n");
357 
358 			kfree(buf);
359 			buf = NULL;
360 		read_failed:
361 			first_raw = ref_next(first_raw);
362 			nr_refile--;
363 			while (first_raw && ref_obsolete(first_raw)) {
364 				first_raw = ref_next(first_raw);
365 				nr_refile--;
366 			}
367 
368 			/* If this was the only node to be recovered, give up */
369 			if (!first_raw) {
370 				c->wbuf_len = 0;
371 				return;
372 			}
373 
374 			/* It wasn't. Go on and try to recover nodes complete in the wbuf */
375 			start = ref_offset(first_raw);
376 			dbg_noderef("wbuf now recover %08x-%08x (%d bytes in %d nodes)\n",
377 				    start, end, end - start, nr_refile);
378 
379 		} else {
380 			/* Read succeeded. Copy the remaining data from the wbuf */
381 			memcpy(buf + (c->wbuf_ofs - start), c->wbuf, end - c->wbuf_ofs);
382 		}
383 	}
384 	/* OK... we're to rewrite (end-start) bytes of data from first_raw onwards.
385 	   Either 'buf' contains the data, or we find it in the wbuf */
386 
387 	/* ... and get an allocation of space from a shiny new block instead */
388 	ret = jffs2_reserve_space_gc(c, end-start, &len, JFFS2_SUMMARY_NOSUM_SIZE);
389 	if (ret) {
390 		pr_warn("Failed to allocate space for wbuf recovery. Data loss ensues.\n");
391 		kfree(buf);
392 		return;
393 	}
394 
395 	/* The summary is not recovered, so it must be disabled for this erase block */
396 	jffs2_sum_disable_collecting(c->summary);
397 
398 	ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, nr_refile);
399 	if (ret) {
400 		pr_warn("Failed to allocate node refs for wbuf recovery. Data loss ensues.\n");
401 		kfree(buf);
402 		return;
403 	}
404 
405 	ofs = write_ofs(c);
406 
407 	if (end-start >= c->wbuf_pagesize) {
408 		/* Need to do another write immediately, but it's possible
409 		   that this is just because the wbuf itself is completely
410 		   full, and there's nothing earlier read back from the
411 		   flash. Hence 'buf' isn't necessarily what we're writing
412 		   from. */
413 		unsigned char *rewrite_buf = buf?:c->wbuf;
414 		uint32_t towrite = (end-start) - ((end-start)%c->wbuf_pagesize);
415 
416 		jffs2_dbg(1, "Write 0x%x bytes at 0x%08x in wbuf recover\n",
417 			  towrite, ofs);
418 
419 #ifdef BREAKMEHEADER
420 		static int breakme;
421 		if (breakme++ == 20) {
422 			pr_notice("Faking write error at 0x%08x\n", ofs);
423 			breakme = 0;
424 			mtd_write(c->mtd, ofs, towrite, &retlen, brokenbuf);
425 			ret = -EIO;
426 		} else
427 #endif
428 			ret = mtd_write(c->mtd, ofs, towrite, &retlen,
429 					rewrite_buf);
430 
431 		if (ret || retlen != towrite || jffs2_verify_write(c, rewrite_buf, ofs)) {
432 			/* Argh. We tried. Really we did. */
433 			pr_crit("Recovery of wbuf failed due to a second write error\n");
434 			kfree(buf);
435 
436 			if (retlen)
437 				jffs2_add_physical_node_ref(c, ofs | REF_OBSOLETE, ref_totlen(c, jeb, first_raw), NULL);
438 
439 			return;
440 		}
441 		pr_notice("Recovery of wbuf succeeded to %08x\n", ofs);
442 
443 		c->wbuf_len = (end - start) - towrite;
444 		c->wbuf_ofs = ofs + towrite;
445 		memmove(c->wbuf, rewrite_buf + towrite, c->wbuf_len);
446 		/* Don't muck about with c->wbuf_inodes. False positives are harmless. */
447 	} else {
448 		/* OK, now we're left with the dregs in whichever buffer we're using */
449 		if (buf) {
450 			memcpy(c->wbuf, buf, end-start);
451 		} else {
452 			memmove(c->wbuf, c->wbuf + (start - c->wbuf_ofs), end - start);
453 		}
454 		c->wbuf_ofs = ofs;
455 		c->wbuf_len = end - start;
456 	}
457 
458 	/* Now sort out the jffs2_raw_node_refs, moving them from the old to the next block */
459 	new_jeb = &c->blocks[ofs / c->sector_size];
460 
461 	spin_lock(&c->erase_completion_lock);
462 	for (raw = first_raw; raw != jeb->last_node; raw = ref_next(raw)) {
463 		uint32_t rawlen = ref_totlen(c, jeb, raw);
464 		struct jffs2_inode_cache *ic;
465 		struct jffs2_raw_node_ref *new_ref;
466 		struct jffs2_raw_node_ref **adjust_ref = NULL;
467 		struct jffs2_inode_info *f = NULL;
468 
469 		jffs2_dbg(1, "Refiling block of %08x at %08x(%d) to %08x\n",
470 			  rawlen, ref_offset(raw), ref_flags(raw), ofs);
471 
472 		ic = jffs2_raw_ref_to_ic(raw);
473 
474 		/* Ick. This XATTR mess should be fixed shortly... */
475 		if (ic && ic->class == RAWNODE_CLASS_XATTR_DATUM) {
476 			struct jffs2_xattr_datum *xd = (void *)ic;
477 			BUG_ON(xd->node != raw);
478 			adjust_ref = &xd->node;
479 			raw->next_in_ino = NULL;
480 			ic = NULL;
481 		} else if (ic && ic->class == RAWNODE_CLASS_XATTR_REF) {
482 			struct jffs2_xattr_datum *xr = (void *)ic;
483 			BUG_ON(xr->node != raw);
484 			adjust_ref = &xr->node;
485 			raw->next_in_ino = NULL;
486 			ic = NULL;
487 		} else if (ic && ic->class == RAWNODE_CLASS_INODE_CACHE) {
488 			struct jffs2_raw_node_ref **p = &ic->nodes;
489 
490 			/* Remove the old node from the per-inode list */
491 			while (*p && *p != (void *)ic) {
492 				if (*p == raw) {
493 					(*p) = (raw->next_in_ino);
494 					raw->next_in_ino = NULL;
495 					break;
496 				}
497 				p = &((*p)->next_in_ino);
498 			}
499 
500 			if (ic->state == INO_STATE_PRESENT && !ref_obsolete(raw)) {
501 				/* If it's an in-core inode, then we have to adjust any
502 				   full_dirent or full_dnode structure to point to the
503 				   new version instead of the old */
504 				f = jffs2_gc_fetch_inode(c, ic->ino, !ic->pino_nlink);
505 				if (IS_ERR(f)) {
506 					/* Should never happen; it _must_ be present */
507 					JFFS2_ERROR("Failed to iget() ino #%u, err %ld\n",
508 						    ic->ino, PTR_ERR(f));
509 					BUG();
510 				}
511 				/* We don't lock f->sem. There's a number of ways we could
512 				   end up in here with it already being locked, and nobody's
513 				   going to modify it on us anyway because we hold the
514 				   alloc_sem. We're only changing one ->raw pointer too,
515 				   which we can get away with without upsetting readers. */
516 				adjust_ref = jffs2_incore_replace_raw(c, f, raw,
517 								      (void *)(buf?:c->wbuf) + (ref_offset(raw) - start));
518 			} else if (unlikely(ic->state != INO_STATE_PRESENT &&
519 					    ic->state != INO_STATE_CHECKEDABSENT &&
520 					    ic->state != INO_STATE_GC)) {
521 				JFFS2_ERROR("Inode #%u is in strange state %d!\n", ic->ino, ic->state);
522 				BUG();
523 			}
524 		}
525 
526 		new_ref = jffs2_link_node_ref(c, new_jeb, ofs | ref_flags(raw), rawlen, ic);
527 
528 		if (adjust_ref) {
529 			BUG_ON(*adjust_ref != raw);
530 			*adjust_ref = new_ref;
531 		}
532 		if (f)
533 			jffs2_gc_release_inode(c, f);
534 
535 		if (!ref_obsolete(raw)) {
536 			jeb->dirty_size += rawlen;
537 			jeb->used_size  -= rawlen;
538 			c->dirty_size += rawlen;
539 			c->used_size -= rawlen;
540 			raw->flash_offset = ref_offset(raw) | REF_OBSOLETE;
541 			BUG_ON(raw->next_in_ino);
542 		}
543 		ofs += rawlen;
544 	}
545 
546 	kfree(buf);
547 
548 	/* Fix up the original jeb now it's on the bad_list */
549 	if (first_raw == jeb->first_node) {
550 		jffs2_dbg(1, "Failing block at %08x is now empty. Moving to erase_pending_list\n",
551 			  jeb->offset);
552 		list_move(&jeb->list, &c->erase_pending_list);
553 		c->nr_erasing_blocks++;
554 		jffs2_garbage_collect_trigger(c);
555 	}
556 
557 	jffs2_dbg_acct_sanity_check_nolock(c, jeb);
558 	jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
559 
560 	jffs2_dbg_acct_sanity_check_nolock(c, new_jeb);
561 	jffs2_dbg_acct_paranoia_check_nolock(c, new_jeb);
562 
563 	spin_unlock(&c->erase_completion_lock);
564 
565 	jffs2_dbg(1, "wbuf recovery completed OK. wbuf_ofs 0x%08x, len 0x%x\n",
566 		  c->wbuf_ofs, c->wbuf_len);
567 
568 }
569 
570 /* Meaning of pad argument:
571    0: Do not pad. Probably pointless - we only ever use this when we can't pad anyway.
572    1: Pad, do not adjust nextblock free_size
573    2: Pad, adjust nextblock free_size
574 */
575 #define NOPAD		0
576 #define PAD_NOACCOUNT	1
577 #define PAD_ACCOUNTING	2
578 
579 static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
580 {
581 	struct jffs2_eraseblock *wbuf_jeb;
582 	int ret;
583 	size_t retlen;
584 
585 	/* Nothing to do if not write-buffering the flash. In particular, we shouldn't
586 	   del_timer() the timer we never initialised. */
587 	if (!jffs2_is_writebuffered(c))
588 		return 0;
589 
590 	if (!mutex_is_locked(&c->alloc_sem)) {
591 		pr_crit("jffs2_flush_wbuf() called with alloc_sem not locked!\n");
592 		BUG();
593 	}
594 
595 	if (!c->wbuf_len)	/* already checked c->wbuf above */
596 		return 0;
597 
598 	wbuf_jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
599 	if (jffs2_prealloc_raw_node_refs(c, wbuf_jeb, c->nextblock->allocated_refs + 1))
600 		return -ENOMEM;
601 
602 	/* claim remaining space on the page
603 	   this happens, if we have a change to a new block,
604 	   or if fsync forces us to flush the writebuffer.
605 	   if we have a switch to next page, we will not have
606 	   enough remaining space for this.
607 	*/
608 	if (pad ) {
609 		c->wbuf_len = PAD(c->wbuf_len);
610 
611 		/* Pad with JFFS2_DIRTY_BITMASK initially.  this helps out ECC'd NOR
612 		   with 8 byte page size */
613 		memset(c->wbuf + c->wbuf_len, 0, c->wbuf_pagesize - c->wbuf_len);
614 
615 		if ( c->wbuf_len + sizeof(struct jffs2_unknown_node) < c->wbuf_pagesize) {
616 			struct jffs2_unknown_node *padnode = (void *)(c->wbuf + c->wbuf_len);
617 			padnode->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
618 			padnode->nodetype = cpu_to_je16(JFFS2_NODETYPE_PADDING);
619 			padnode->totlen = cpu_to_je32(c->wbuf_pagesize - c->wbuf_len);
620 			padnode->hdr_crc = cpu_to_je32(crc32(0, padnode, sizeof(*padnode)-4));
621 		}
622 	}
623 	/* else jffs2_flash_writev has actually filled in the rest of the
624 	   buffer for us, and will deal with the node refs etc. later. */
625 
626 #ifdef BREAKME
627 	static int breakme;
628 	if (breakme++ == 20) {
629 		pr_notice("Faking write error at 0x%08x\n", c->wbuf_ofs);
630 		breakme = 0;
631 		mtd_write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen,
632 			  brokenbuf);
633 		ret = -EIO;
634 	} else
635 #endif
636 
637 		ret = mtd_write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize,
638 				&retlen, c->wbuf);
639 
640 	if (ret) {
641 		pr_warn("jffs2_flush_wbuf(): Write failed with %d\n", ret);
642 		goto wfail;
643 	} else if (retlen != c->wbuf_pagesize) {
644 		pr_warn("jffs2_flush_wbuf(): Write was short: %zd instead of %d\n",
645 			retlen, c->wbuf_pagesize);
646 		ret = -EIO;
647 		goto wfail;
648 	} else if ((ret = jffs2_verify_write(c, c->wbuf, c->wbuf_ofs))) {
649 	wfail:
650 		jffs2_wbuf_recover(c);
651 
652 		return ret;
653 	}
654 
655 	/* Adjust free size of the block if we padded. */
656 	if (pad) {
657 		uint32_t waste = c->wbuf_pagesize - c->wbuf_len;
658 
659 		jffs2_dbg(1, "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n",
660 			  (wbuf_jeb == c->nextblock) ? "next" : "",
661 			  wbuf_jeb->offset);
662 
663 		/* wbuf_pagesize - wbuf_len is the amount of space that's to be
664 		   padded. If there is less free space in the block than that,
665 		   something screwed up */
666 		if (wbuf_jeb->free_size < waste) {
667 			pr_crit("jffs2_flush_wbuf(): Accounting error. wbuf at 0x%08x has 0x%03x bytes, 0x%03x left.\n",
668 				c->wbuf_ofs, c->wbuf_len, waste);
669 			pr_crit("jffs2_flush_wbuf(): But free_size for block at 0x%08x is only 0x%08x\n",
670 				wbuf_jeb->offset, wbuf_jeb->free_size);
671 			BUG();
672 		}
673 
674 		spin_lock(&c->erase_completion_lock);
675 
676 		jffs2_link_node_ref(c, wbuf_jeb, (c->wbuf_ofs + c->wbuf_len) | REF_OBSOLETE, waste, NULL);
677 		/* FIXME: that made it count as dirty. Convert to wasted */
678 		wbuf_jeb->dirty_size -= waste;
679 		c->dirty_size -= waste;
680 		wbuf_jeb->wasted_size += waste;
681 		c->wasted_size += waste;
682 	} else
683 		spin_lock(&c->erase_completion_lock);
684 
685 	/* Stick any now-obsoleted blocks on the erase_pending_list */
686 	jffs2_refile_wbuf_blocks(c);
687 	jffs2_clear_wbuf_ino_list(c);
688 	spin_unlock(&c->erase_completion_lock);
689 
690 	memset(c->wbuf,0xff,c->wbuf_pagesize);
691 	/* adjust write buffer offset, else we get a non contiguous write bug */
692 	c->wbuf_ofs += c->wbuf_pagesize;
693 	c->wbuf_len = 0;
694 	return 0;
695 }
696 
697 /* Trigger garbage collection to flush the write-buffer.
698    If ino arg is zero, do it if _any_ real (i.e. not GC) writes are
699    outstanding. If ino arg non-zero, do it only if a write for the
700    given inode is outstanding. */
701 int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino)
702 {
703 	uint32_t old_wbuf_ofs;
704 	uint32_t old_wbuf_len;
705 	int ret = 0;
706 
707 	jffs2_dbg(1, "jffs2_flush_wbuf_gc() called for ino #%u...\n", ino);
708 
709 	if (!c->wbuf)
710 		return 0;
711 
712 	mutex_lock(&c->alloc_sem);
713 	if (!jffs2_wbuf_pending_for_ino(c, ino)) {
714 		jffs2_dbg(1, "Ino #%d not pending in wbuf. Returning\n", ino);
715 		mutex_unlock(&c->alloc_sem);
716 		return 0;
717 	}
718 
719 	old_wbuf_ofs = c->wbuf_ofs;
720 	old_wbuf_len = c->wbuf_len;
721 
722 	if (c->unchecked_size) {
723 		/* GC won't make any progress for a while */
724 		jffs2_dbg(1, "%s(): padding. Not finished checking\n",
725 			  __func__);
726 		down_write(&c->wbuf_sem);
727 		ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
728 		/* retry flushing wbuf in case jffs2_wbuf_recover
729 		   left some data in the wbuf */
730 		if (ret)
731 			ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
732 		up_write(&c->wbuf_sem);
733 	} else while (old_wbuf_len &&
734 		      old_wbuf_ofs == c->wbuf_ofs) {
735 
736 		mutex_unlock(&c->alloc_sem);
737 
738 		jffs2_dbg(1, "%s(): calls gc pass\n", __func__);
739 
740 		ret = jffs2_garbage_collect_pass(c);
741 		if (ret) {
742 			/* GC failed. Flush it with padding instead */
743 			mutex_lock(&c->alloc_sem);
744 			down_write(&c->wbuf_sem);
745 			ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
746 			/* retry flushing wbuf in case jffs2_wbuf_recover
747 			   left some data in the wbuf */
748 			if (ret)
749 				ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
750 			up_write(&c->wbuf_sem);
751 			break;
752 		}
753 		mutex_lock(&c->alloc_sem);
754 	}
755 
756 	jffs2_dbg(1, "%s(): ends...\n", __func__);
757 
758 	mutex_unlock(&c->alloc_sem);
759 	return ret;
760 }
761 
762 /* Pad write-buffer to end and write it, wasting space. */
763 int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c)
764 {
765 	int ret;
766 
767 	if (!c->wbuf)
768 		return 0;
769 
770 	down_write(&c->wbuf_sem);
771 	ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
772 	/* retry - maybe wbuf recover left some data in wbuf. */
773 	if (ret)
774 		ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
775 	up_write(&c->wbuf_sem);
776 
777 	return ret;
778 }
779 
780 static size_t jffs2_fill_wbuf(struct jffs2_sb_info *c, const uint8_t *buf,
781 			      size_t len)
782 {
783 	if (len && !c->wbuf_len && (len >= c->wbuf_pagesize))
784 		return 0;
785 
786 	if (len > (c->wbuf_pagesize - c->wbuf_len))
787 		len = c->wbuf_pagesize - c->wbuf_len;
788 	memcpy(c->wbuf + c->wbuf_len, buf, len);
789 	c->wbuf_len += (uint32_t) len;
790 	return len;
791 }
792 
793 int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs,
794 		       unsigned long count, loff_t to, size_t *retlen,
795 		       uint32_t ino)
796 {
797 	struct jffs2_eraseblock *jeb;
798 	size_t wbuf_retlen, donelen = 0;
799 	uint32_t outvec_to = to;
800 	int ret, invec;
801 
802 	/* If not writebuffered flash, don't bother */
803 	if (!jffs2_is_writebuffered(c))
804 		return jffs2_flash_direct_writev(c, invecs, count, to, retlen);
805 
806 	down_write(&c->wbuf_sem);
807 
808 	/* If wbuf_ofs is not initialized, set it to target address */
809 	if (c->wbuf_ofs == 0xFFFFFFFF) {
810 		c->wbuf_ofs = PAGE_DIV(to);
811 		c->wbuf_len = PAGE_MOD(to);
812 		memset(c->wbuf,0xff,c->wbuf_pagesize);
813 	}
814 
815 	/*
816 	 * Sanity checks on target address.  It's permitted to write
817 	 * at PAD(c->wbuf_len+c->wbuf_ofs), and it's permitted to
818 	 * write at the beginning of a new erase block. Anything else,
819 	 * and you die.  New block starts at xxx000c (0-b = block
820 	 * header)
821 	 */
822 	if (SECTOR_ADDR(to) != SECTOR_ADDR(c->wbuf_ofs)) {
823 		/* It's a write to a new block */
824 		if (c->wbuf_len) {
825 			jffs2_dbg(1, "%s(): to 0x%lx causes flush of wbuf at 0x%08x\n",
826 				  __func__, (unsigned long)to, c->wbuf_ofs);
827 			ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
828 			if (ret)
829 				goto outerr;
830 		}
831 		/* set pointer to new block */
832 		c->wbuf_ofs = PAGE_DIV(to);
833 		c->wbuf_len = PAGE_MOD(to);
834 	}
835 
836 	if (to != PAD(c->wbuf_ofs + c->wbuf_len)) {
837 		/* We're not writing immediately after the writebuffer. Bad. */
838 		pr_crit("%s(): Non-contiguous write to %08lx\n",
839 			__func__, (unsigned long)to);
840 		if (c->wbuf_len)
841 			pr_crit("wbuf was previously %08x-%08x\n",
842 				c->wbuf_ofs, c->wbuf_ofs + c->wbuf_len);
843 		BUG();
844 	}
845 
846 	/* adjust alignment offset */
847 	if (c->wbuf_len != PAGE_MOD(to)) {
848 		c->wbuf_len = PAGE_MOD(to);
849 		/* take care of alignment to next page */
850 		if (!c->wbuf_len) {
851 			c->wbuf_len = c->wbuf_pagesize;
852 			ret = __jffs2_flush_wbuf(c, NOPAD);
853 			if (ret)
854 				goto outerr;
855 		}
856 	}
857 
858 	for (invec = 0; invec < count; invec++) {
859 		int vlen = invecs[invec].iov_len;
860 		uint8_t *v = invecs[invec].iov_base;
861 
862 		wbuf_retlen = jffs2_fill_wbuf(c, v, vlen);
863 
864 		if (c->wbuf_len == c->wbuf_pagesize) {
865 			ret = __jffs2_flush_wbuf(c, NOPAD);
866 			if (ret)
867 				goto outerr;
868 		}
869 		vlen -= wbuf_retlen;
870 		outvec_to += wbuf_retlen;
871 		donelen += wbuf_retlen;
872 		v += wbuf_retlen;
873 
874 		if (vlen >= c->wbuf_pagesize) {
875 			ret = mtd_write(c->mtd, outvec_to, PAGE_DIV(vlen),
876 					&wbuf_retlen, v);
877 			if (ret < 0 || wbuf_retlen != PAGE_DIV(vlen))
878 				goto outfile;
879 
880 			vlen -= wbuf_retlen;
881 			outvec_to += wbuf_retlen;
882 			c->wbuf_ofs = outvec_to;
883 			donelen += wbuf_retlen;
884 			v += wbuf_retlen;
885 		}
886 
887 		wbuf_retlen = jffs2_fill_wbuf(c, v, vlen);
888 		if (c->wbuf_len == c->wbuf_pagesize) {
889 			ret = __jffs2_flush_wbuf(c, NOPAD);
890 			if (ret)
891 				goto outerr;
892 		}
893 
894 		outvec_to += wbuf_retlen;
895 		donelen += wbuf_retlen;
896 	}
897 
898 	/*
899 	 * If there's a remainder in the wbuf and it's a non-GC write,
900 	 * remember that the wbuf affects this ino
901 	 */
902 	*retlen = donelen;
903 
904 	if (jffs2_sum_active()) {
905 		int res = jffs2_sum_add_kvec(c, invecs, count, (uint32_t) to);
906 		if (res)
907 			return res;
908 	}
909 
910 	if (c->wbuf_len && ino)
911 		jffs2_wbuf_dirties_inode(c, ino);
912 
913 	ret = 0;
914 	up_write(&c->wbuf_sem);
915 	return ret;
916 
917 outfile:
918 	/*
919 	 * At this point we have no problem, c->wbuf is empty. However
920 	 * refile nextblock to avoid writing again to same address.
921 	 */
922 
923 	spin_lock(&c->erase_completion_lock);
924 
925 	jeb = &c->blocks[outvec_to / c->sector_size];
926 	jffs2_block_refile(c, jeb, REFILE_ANYWAY);
927 
928 	spin_unlock(&c->erase_completion_lock);
929 
930 outerr:
931 	*retlen = 0;
932 	up_write(&c->wbuf_sem);
933 	return ret;
934 }
935 
936 /*
937  *	This is the entry for flash write.
938  *	Check, if we work on NAND FLASH, if so build an kvec and write it via vritev
939 */
940 int jffs2_flash_write(struct jffs2_sb_info *c, loff_t ofs, size_t len,
941 		      size_t *retlen, const u_char *buf)
942 {
943 	struct kvec vecs[1];
944 
945 	if (!jffs2_is_writebuffered(c))
946 		return jffs2_flash_direct_write(c, ofs, len, retlen, buf);
947 
948 	vecs[0].iov_base = (unsigned char *) buf;
949 	vecs[0].iov_len = len;
950 	return jffs2_flash_writev(c, vecs, 1, ofs, retlen, 0);
951 }
952 
953 /*
954 	Handle readback from writebuffer and ECC failure return
955 */
956 int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, u_char *buf)
957 {
958 	loff_t	orbf = 0, owbf = 0, lwbf = 0;
959 	int	ret;
960 
961 	if (!jffs2_is_writebuffered(c))
962 		return mtd_read(c->mtd, ofs, len, retlen, buf);
963 
964 	/* Read flash */
965 	down_read(&c->wbuf_sem);
966 	ret = mtd_read(c->mtd, ofs, len, retlen, buf);
967 
968 	if ( (ret == -EBADMSG || ret == -EUCLEAN) && (*retlen == len) ) {
969 		if (ret == -EBADMSG)
970 			pr_warn("mtd->read(0x%zx bytes from 0x%llx) returned ECC error\n",
971 				len, ofs);
972 		/*
973 		 * We have the raw data without ECC correction in the buffer,
974 		 * maybe we are lucky and all data or parts are correct. We
975 		 * check the node.  If data are corrupted node check will sort
976 		 * it out.  We keep this block, it will fail on write or erase
977 		 * and the we mark it bad. Or should we do that now? But we
978 		 * should give him a chance.  Maybe we had a system crash or
979 		 * power loss before the ecc write or a erase was completed.
980 		 * So we return success. :)
981 		 */
982 		ret = 0;
983 	}
984 
985 	/* if no writebuffer available or write buffer empty, return */
986 	if (!c->wbuf_pagesize || !c->wbuf_len)
987 		goto exit;
988 
989 	/* if we read in a different block, return */
990 	if (SECTOR_ADDR(ofs) != SECTOR_ADDR(c->wbuf_ofs))
991 		goto exit;
992 
993 	if (ofs >= c->wbuf_ofs) {
994 		owbf = (ofs - c->wbuf_ofs);	/* offset in write buffer */
995 		if (owbf > c->wbuf_len)		/* is read beyond write buffer ? */
996 			goto exit;
997 		lwbf = c->wbuf_len - owbf;	/* number of bytes to copy */
998 		if (lwbf > len)
999 			lwbf = len;
1000 	} else {
1001 		orbf = (c->wbuf_ofs - ofs);	/* offset in read buffer */
1002 		if (orbf > len)			/* is write beyond write buffer ? */
1003 			goto exit;
1004 		lwbf = len - orbf;		/* number of bytes to copy */
1005 		if (lwbf > c->wbuf_len)
1006 			lwbf = c->wbuf_len;
1007 	}
1008 	if (lwbf > 0)
1009 		memcpy(buf+orbf,c->wbuf+owbf,lwbf);
1010 
1011 exit:
1012 	up_read(&c->wbuf_sem);
1013 	return ret;
1014 }
1015 
1016 #define NR_OOB_SCAN_PAGES 4
1017 
1018 /* For historical reasons we use only 8 bytes for OOB clean marker */
1019 #define OOB_CM_SIZE 8
1020 
1021 static const struct jffs2_unknown_node oob_cleanmarker =
1022 {
1023 	.magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
1024 	.nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
1025 	.totlen = constant_cpu_to_je32(8)
1026 };
1027 
1028 /*
1029  * Check, if the out of band area is empty. This function knows about the clean
1030  * marker and if it is present in OOB, treats the OOB as empty anyway.
1031  */
1032 int jffs2_check_oob_empty(struct jffs2_sb_info *c,
1033 			  struct jffs2_eraseblock *jeb, int mode)
1034 {
1035 	int i, ret;
1036 	int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
1037 	struct mtd_oob_ops ops;
1038 
1039 	ops.mode = MTD_OPS_AUTO_OOB;
1040 	ops.ooblen = NR_OOB_SCAN_PAGES * c->oobavail;
1041 	ops.oobbuf = c->oobbuf;
1042 	ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
1043 	ops.datbuf = NULL;
1044 
1045 	ret = mtd_read_oob(c->mtd, jeb->offset, &ops);
1046 	if (ret || ops.oobretlen != ops.ooblen) {
1047 		pr_err("cannot read OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n",
1048 		       jeb->offset, ops.ooblen, ops.oobretlen, ret);
1049 		if (!ret)
1050 			ret = -EIO;
1051 		return ret;
1052 	}
1053 
1054 	for(i = 0; i < ops.ooblen; i++) {
1055 		if (mode && i < cmlen)
1056 			/* Yeah, we know about the cleanmarker */
1057 			continue;
1058 
1059 		if (ops.oobbuf[i] != 0xFF) {
1060 			jffs2_dbg(2, "Found %02x at %x in OOB for "
1061 				  "%08x\n", ops.oobbuf[i], i, jeb->offset);
1062 			return 1;
1063 		}
1064 	}
1065 
1066 	return 0;
1067 }
1068 
1069 /*
1070  * Check for a valid cleanmarker.
1071  * Returns: 0 if a valid cleanmarker was found
1072  *	    1 if no cleanmarker was found
1073  *	    negative error code if an error occurred
1074  */
1075 int jffs2_check_nand_cleanmarker(struct jffs2_sb_info *c,
1076 				 struct jffs2_eraseblock *jeb)
1077 {
1078 	struct mtd_oob_ops ops;
1079 	int ret, cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
1080 
1081 	ops.mode = MTD_OPS_AUTO_OOB;
1082 	ops.ooblen = cmlen;
1083 	ops.oobbuf = c->oobbuf;
1084 	ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
1085 	ops.datbuf = NULL;
1086 
1087 	ret = mtd_read_oob(c->mtd, jeb->offset, &ops);
1088 	if (ret || ops.oobretlen != ops.ooblen) {
1089 		pr_err("cannot read OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n",
1090 		       jeb->offset, ops.ooblen, ops.oobretlen, ret);
1091 		if (!ret)
1092 			ret = -EIO;
1093 		return ret;
1094 	}
1095 
1096 	return !!memcmp(&oob_cleanmarker, c->oobbuf, cmlen);
1097 }
1098 
1099 int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c,
1100 				 struct jffs2_eraseblock *jeb)
1101 {
1102 	int ret;
1103 	struct mtd_oob_ops ops;
1104 	int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
1105 
1106 	ops.mode = MTD_OPS_AUTO_OOB;
1107 	ops.ooblen = cmlen;
1108 	ops.oobbuf = (uint8_t *)&oob_cleanmarker;
1109 	ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
1110 	ops.datbuf = NULL;
1111 
1112 	ret = mtd_write_oob(c->mtd, jeb->offset, &ops);
1113 	if (ret || ops.oobretlen != ops.ooblen) {
1114 		pr_err("cannot write OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n",
1115 		       jeb->offset, ops.ooblen, ops.oobretlen, ret);
1116 		if (!ret)
1117 			ret = -EIO;
1118 		return ret;
1119 	}
1120 
1121 	return 0;
1122 }
1123 
1124 /*
1125  * On NAND we try to mark this block bad. If the block was erased more
1126  * than MAX_ERASE_FAILURES we mark it finally bad.
1127  * Don't care about failures. This block remains on the erase-pending
1128  * or badblock list as long as nobody manipulates the flash with
1129  * a bootloader or something like that.
1130  */
1131 
1132 int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset)
1133 {
1134 	int 	ret;
1135 
1136 	/* if the count is < max, we try to write the counter to the 2nd page oob area */
1137 	if( ++jeb->bad_count < MAX_ERASE_FAILURES)
1138 		return 0;
1139 
1140 	pr_warn("marking eraseblock at %08x as bad\n", bad_offset);
1141 	ret = mtd_block_markbad(c->mtd, bad_offset);
1142 
1143 	if (ret) {
1144 		jffs2_dbg(1, "%s(): Write failed for block at %08x: error %d\n",
1145 			  __func__, jeb->offset, ret);
1146 		return ret;
1147 	}
1148 	return 1;
1149 }
1150 
1151 int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
1152 {
1153 	struct nand_ecclayout *oinfo = c->mtd->ecclayout;
1154 
1155 	if (!c->mtd->oobsize)
1156 		return 0;
1157 
1158 	/* Cleanmarker is out-of-band, so inline size zero */
1159 	c->cleanmarker_size = 0;
1160 
1161 	if (!oinfo || oinfo->oobavail == 0) {
1162 		pr_err("inconsistent device description\n");
1163 		return -EINVAL;
1164 	}
1165 
1166 	jffs2_dbg(1, "using OOB on NAND\n");
1167 
1168 	c->oobavail = oinfo->oobavail;
1169 
1170 	/* Initialise write buffer */
1171 	init_rwsem(&c->wbuf_sem);
1172 	c->wbuf_pagesize = c->mtd->writesize;
1173 	c->wbuf_ofs = 0xFFFFFFFF;
1174 
1175 	c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1176 	if (!c->wbuf)
1177 		return -ENOMEM;
1178 
1179 	c->oobbuf = kmalloc(NR_OOB_SCAN_PAGES * c->oobavail, GFP_KERNEL);
1180 	if (!c->oobbuf) {
1181 		kfree(c->wbuf);
1182 		return -ENOMEM;
1183 	}
1184 
1185 #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1186 	c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1187 	if (!c->wbuf_verify) {
1188 		kfree(c->oobbuf);
1189 		kfree(c->wbuf);
1190 		return -ENOMEM;
1191 	}
1192 #endif
1193 	return 0;
1194 }
1195 
1196 void jffs2_nand_flash_cleanup(struct jffs2_sb_info *c)
1197 {
1198 #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1199 	kfree(c->wbuf_verify);
1200 #endif
1201 	kfree(c->wbuf);
1202 	kfree(c->oobbuf);
1203 }
1204 
1205 int jffs2_dataflash_setup(struct jffs2_sb_info *c) {
1206 	c->cleanmarker_size = 0;		/* No cleanmarkers needed */
1207 
1208 	/* Initialize write buffer */
1209 	init_rwsem(&c->wbuf_sem);
1210 
1211 
1212 	c->wbuf_pagesize =  c->mtd->erasesize;
1213 
1214 	/* Find a suitable c->sector_size
1215 	 * - Not too much sectors
1216 	 * - Sectors have to be at least 4 K + some bytes
1217 	 * - All known dataflashes have erase sizes of 528 or 1056
1218 	 * - we take at least 8 eraseblocks and want to have at least 8K size
1219 	 * - The concatenation should be a power of 2
1220 	*/
1221 
1222 	c->sector_size = 8 * c->mtd->erasesize;
1223 
1224 	while (c->sector_size < 8192) {
1225 		c->sector_size *= 2;
1226 	}
1227 
1228 	/* It may be necessary to adjust the flash size */
1229 	c->flash_size = c->mtd->size;
1230 
1231 	if ((c->flash_size % c->sector_size) != 0) {
1232 		c->flash_size = (c->flash_size / c->sector_size) * c->sector_size;
1233 		pr_warn("flash size adjusted to %dKiB\n", c->flash_size);
1234 	};
1235 
1236 	c->wbuf_ofs = 0xFFFFFFFF;
1237 	c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1238 	if (!c->wbuf)
1239 		return -ENOMEM;
1240 
1241 #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1242 	c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1243 	if (!c->wbuf_verify) {
1244 		kfree(c->oobbuf);
1245 		kfree(c->wbuf);
1246 		return -ENOMEM;
1247 	}
1248 #endif
1249 
1250 	pr_info("write-buffering enabled buffer (%d) erasesize (%d)\n",
1251 		c->wbuf_pagesize, c->sector_size);
1252 
1253 	return 0;
1254 }
1255 
1256 void jffs2_dataflash_cleanup(struct jffs2_sb_info *c) {
1257 #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1258 	kfree(c->wbuf_verify);
1259 #endif
1260 	kfree(c->wbuf);
1261 }
1262 
1263 int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) {
1264 	/* Cleanmarker currently occupies whole programming regions,
1265 	 * either one or 2 for 8Byte STMicro flashes. */
1266 	c->cleanmarker_size = max(16u, c->mtd->writesize);
1267 
1268 	/* Initialize write buffer */
1269 	init_rwsem(&c->wbuf_sem);
1270 	c->wbuf_pagesize = c->mtd->writesize;
1271 	c->wbuf_ofs = 0xFFFFFFFF;
1272 
1273 	c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1274 	if (!c->wbuf)
1275 		return -ENOMEM;
1276 
1277 #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1278 	c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1279 	if (!c->wbuf_verify) {
1280 		kfree(c->wbuf);
1281 		return -ENOMEM;
1282 	}
1283 #endif
1284 	return 0;
1285 }
1286 
1287 void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c) {
1288 #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1289 	kfree(c->wbuf_verify);
1290 #endif
1291 	kfree(c->wbuf);
1292 }
1293 
1294 int jffs2_ubivol_setup(struct jffs2_sb_info *c) {
1295 	c->cleanmarker_size = 0;
1296 
1297 	if (c->mtd->writesize == 1)
1298 		/* We do not need write-buffer */
1299 		return 0;
1300 
1301 	init_rwsem(&c->wbuf_sem);
1302 
1303 	c->wbuf_pagesize =  c->mtd->writesize;
1304 	c->wbuf_ofs = 0xFFFFFFFF;
1305 	c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1306 	if (!c->wbuf)
1307 		return -ENOMEM;
1308 
1309 	pr_info("write-buffering enabled buffer (%d) erasesize (%d)\n",
1310 		c->wbuf_pagesize, c->sector_size);
1311 
1312 	return 0;
1313 }
1314 
1315 void jffs2_ubivol_cleanup(struct jffs2_sb_info *c) {
1316 	kfree(c->wbuf);
1317 }
1318