xref: /openbmc/linux/fs/jffs2/wbuf.c (revision 9c261b33)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * JFFS2 -- Journalling Flash File System, Version 2.
31da177e4SLinus Torvalds  *
4c00c310eSDavid Woodhouse  * Copyright © 2001-2007 Red Hat, Inc.
5c00c310eSDavid Woodhouse  * Copyright © 2004 Thomas Gleixner <tglx@linutronix.de>
61da177e4SLinus Torvalds  *
71da177e4SLinus Torvalds  * Created by David Woodhouse <dwmw2@infradead.org>
81da177e4SLinus Torvalds  * Modified debugged and enhanced by Thomas Gleixner <tglx@linutronix.de>
91da177e4SLinus Torvalds  *
101da177e4SLinus Torvalds  * For licensing information, see the file 'LICENCE' in this directory.
111da177e4SLinus Torvalds  *
121da177e4SLinus Torvalds  */
131da177e4SLinus Torvalds 
141da177e4SLinus Torvalds #include <linux/kernel.h>
151da177e4SLinus Torvalds #include <linux/slab.h>
161da177e4SLinus Torvalds #include <linux/mtd/mtd.h>
171da177e4SLinus Torvalds #include <linux/crc32.h>
181da177e4SLinus Torvalds #include <linux/mtd/nand.h>
194e57b681STim Schmielau #include <linux/jiffies.h>
20914e2637SAl Viro #include <linux/sched.h>
214e57b681STim Schmielau 
221da177e4SLinus Torvalds #include "nodelist.h"
231da177e4SLinus Torvalds 
241da177e4SLinus Torvalds /* For testing write failures */
251da177e4SLinus Torvalds #undef BREAKME
261da177e4SLinus Torvalds #undef BREAKMEHEADER
271da177e4SLinus Torvalds 
281da177e4SLinus Torvalds #ifdef BREAKME
291da177e4SLinus Torvalds static unsigned char *brokenbuf;
301da177e4SLinus Torvalds #endif
311da177e4SLinus Torvalds 
32daba5cc4SArtem B. Bityutskiy #define PAGE_DIV(x) ( ((unsigned long)(x) / (unsigned long)(c->wbuf_pagesize)) * (unsigned long)(c->wbuf_pagesize) )
33daba5cc4SArtem B. Bityutskiy #define PAGE_MOD(x) ( (unsigned long)(x) % (unsigned long)(c->wbuf_pagesize) )
34daba5cc4SArtem B. Bityutskiy 
351da177e4SLinus Torvalds /* max. erase failures before we mark a block bad */
361da177e4SLinus Torvalds #define MAX_ERASE_FAILURES 	2
371da177e4SLinus Torvalds 
381da177e4SLinus Torvalds struct jffs2_inodirty {
391da177e4SLinus Torvalds 	uint32_t ino;
401da177e4SLinus Torvalds 	struct jffs2_inodirty *next;
411da177e4SLinus Torvalds };
421da177e4SLinus Torvalds 
431da177e4SLinus Torvalds static struct jffs2_inodirty inodirty_nomem;
441da177e4SLinus Torvalds 
451da177e4SLinus Torvalds static int jffs2_wbuf_pending_for_ino(struct jffs2_sb_info *c, uint32_t ino)
461da177e4SLinus Torvalds {
471da177e4SLinus Torvalds 	struct jffs2_inodirty *this = c->wbuf_inodes;
481da177e4SLinus Torvalds 
491da177e4SLinus Torvalds 	/* If a malloc failed, consider _everything_ dirty */
501da177e4SLinus Torvalds 	if (this == &inodirty_nomem)
511da177e4SLinus Torvalds 		return 1;
521da177e4SLinus Torvalds 
531da177e4SLinus Torvalds 	/* If ino == 0, _any_ non-GC writes mean 'yes' */
541da177e4SLinus Torvalds 	if (this && !ino)
551da177e4SLinus Torvalds 		return 1;
561da177e4SLinus Torvalds 
571da177e4SLinus Torvalds 	/* Look to see if the inode in question is pending in the wbuf */
581da177e4SLinus Torvalds 	while (this) {
591da177e4SLinus Torvalds 		if (this->ino == ino)
601da177e4SLinus Torvalds 			return 1;
611da177e4SLinus Torvalds 		this = this->next;
621da177e4SLinus Torvalds 	}
631da177e4SLinus Torvalds 	return 0;
641da177e4SLinus Torvalds }
651da177e4SLinus Torvalds 
661da177e4SLinus Torvalds static void jffs2_clear_wbuf_ino_list(struct jffs2_sb_info *c)
671da177e4SLinus Torvalds {
681da177e4SLinus Torvalds 	struct jffs2_inodirty *this;
691da177e4SLinus Torvalds 
701da177e4SLinus Torvalds 	this = c->wbuf_inodes;
711da177e4SLinus Torvalds 
721da177e4SLinus Torvalds 	if (this != &inodirty_nomem) {
731da177e4SLinus Torvalds 		while (this) {
741da177e4SLinus Torvalds 			struct jffs2_inodirty *next = this->next;
751da177e4SLinus Torvalds 			kfree(this);
761da177e4SLinus Torvalds 			this = next;
771da177e4SLinus Torvalds 		}
781da177e4SLinus Torvalds 	}
791da177e4SLinus Torvalds 	c->wbuf_inodes = NULL;
801da177e4SLinus Torvalds }
811da177e4SLinus Torvalds 
821da177e4SLinus Torvalds static void jffs2_wbuf_dirties_inode(struct jffs2_sb_info *c, uint32_t ino)
831da177e4SLinus Torvalds {
841da177e4SLinus Torvalds 	struct jffs2_inodirty *new;
851da177e4SLinus Torvalds 
861da177e4SLinus Torvalds 	/* Mark the superblock dirty so that kupdated will flush... */
8764a5c2ebSJoakim Tjernlund 	jffs2_dirty_trigger(c);
881da177e4SLinus Torvalds 
891da177e4SLinus Torvalds 	if (jffs2_wbuf_pending_for_ino(c, ino))
901da177e4SLinus Torvalds 		return;
911da177e4SLinus Torvalds 
921da177e4SLinus Torvalds 	new = kmalloc(sizeof(*new), GFP_KERNEL);
931da177e4SLinus Torvalds 	if (!new) {
949c261b33SJoe Perches 		jffs2_dbg(1, "No memory to allocate inodirty. Fallback to all considered dirty\n");
951da177e4SLinus Torvalds 		jffs2_clear_wbuf_ino_list(c);
961da177e4SLinus Torvalds 		c->wbuf_inodes = &inodirty_nomem;
971da177e4SLinus Torvalds 		return;
981da177e4SLinus Torvalds 	}
991da177e4SLinus Torvalds 	new->ino = ino;
1001da177e4SLinus Torvalds 	new->next = c->wbuf_inodes;
1011da177e4SLinus Torvalds 	c->wbuf_inodes = new;
1021da177e4SLinus Torvalds 	return;
1031da177e4SLinus Torvalds }
1041da177e4SLinus Torvalds 
1051da177e4SLinus Torvalds static inline void jffs2_refile_wbuf_blocks(struct jffs2_sb_info *c)
1061da177e4SLinus Torvalds {
1071da177e4SLinus Torvalds 	struct list_head *this, *next;
1081da177e4SLinus Torvalds 	static int n;
1091da177e4SLinus Torvalds 
1101da177e4SLinus Torvalds 	if (list_empty(&c->erasable_pending_wbuf_list))
1111da177e4SLinus Torvalds 		return;
1121da177e4SLinus Torvalds 
1131da177e4SLinus Torvalds 	list_for_each_safe(this, next, &c->erasable_pending_wbuf_list) {
1141da177e4SLinus Torvalds 		struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
1151da177e4SLinus Torvalds 
1169c261b33SJoe Perches 		jffs2_dbg(1, "Removing eraseblock at 0x%08x from erasable_pending_wbuf_list...\n",
1179c261b33SJoe Perches 			  jeb->offset);
1181da177e4SLinus Torvalds 		list_del(this);
1191da177e4SLinus Torvalds 		if ((jiffies + (n++)) & 127) {
1201da177e4SLinus Torvalds 			/* Most of the time, we just erase it immediately. Otherwise we
1211da177e4SLinus Torvalds 			   spend ages scanning it on mount, etc. */
1229c261b33SJoe Perches 			jffs2_dbg(1, "...and adding to erase_pending_list\n");
1231da177e4SLinus Torvalds 			list_add_tail(&jeb->list, &c->erase_pending_list);
1241da177e4SLinus Torvalds 			c->nr_erasing_blocks++;
125ae3b6ba0SDavid Woodhouse 			jffs2_garbage_collect_trigger(c);
1261da177e4SLinus Torvalds 		} else {
1271da177e4SLinus Torvalds 			/* Sometimes, however, we leave it elsewhere so it doesn't get
1281da177e4SLinus Torvalds 			   immediately reused, and we spread the load a bit. */
1299c261b33SJoe Perches 			jffs2_dbg(1, "...and adding to erasable_list\n");
1301da177e4SLinus Torvalds 			list_add_tail(&jeb->list, &c->erasable_list);
1311da177e4SLinus Torvalds 		}
1321da177e4SLinus Torvalds 	}
1331da177e4SLinus Torvalds }
1341da177e4SLinus Torvalds 
1357f716cf3SEstelle Hammache #define REFILE_NOTEMPTY 0
1367f716cf3SEstelle Hammache #define REFILE_ANYWAY   1
1377f716cf3SEstelle Hammache 
1387f716cf3SEstelle Hammache static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int allow_empty)
1391da177e4SLinus Torvalds {
1409c261b33SJoe Perches 	jffs2_dbg(1, "About to refile bad block at %08x\n", jeb->offset);
1411da177e4SLinus Torvalds 
1421da177e4SLinus Torvalds 	/* File the existing block on the bad_used_list.... */
1431da177e4SLinus Torvalds 	if (c->nextblock == jeb)
1441da177e4SLinus Torvalds 		c->nextblock = NULL;
1451da177e4SLinus Torvalds 	else /* Not sure this should ever happen... need more coffee */
1461da177e4SLinus Torvalds 		list_del(&jeb->list);
1471da177e4SLinus Torvalds 	if (jeb->first_node) {
1489c261b33SJoe Perches 		jffs2_dbg(1, "Refiling block at %08x to bad_used_list\n",
1499c261b33SJoe Perches 			  jeb->offset);
1501da177e4SLinus Torvalds 		list_add(&jeb->list, &c->bad_used_list);
1511da177e4SLinus Torvalds 	} else {
1529b88f473SEstelle Hammache 		BUG_ON(allow_empty == REFILE_NOTEMPTY);
1531da177e4SLinus Torvalds 		/* It has to have had some nodes or we couldn't be here */
1549c261b33SJoe Perches 		jffs2_dbg(1, "Refiling block at %08x to erase_pending_list\n",
1559c261b33SJoe Perches 			  jeb->offset);
1561da177e4SLinus Torvalds 		list_add(&jeb->list, &c->erase_pending_list);
1571da177e4SLinus Torvalds 		c->nr_erasing_blocks++;
158ae3b6ba0SDavid Woodhouse 		jffs2_garbage_collect_trigger(c);
1591da177e4SLinus Torvalds 	}
1601da177e4SLinus Torvalds 
1619bfeb691SDavid Woodhouse 	if (!jffs2_prealloc_raw_node_refs(c, jeb, 1)) {
1629bfeb691SDavid Woodhouse 		uint32_t oldfree = jeb->free_size;
1639bfeb691SDavid Woodhouse 
1649bfeb691SDavid Woodhouse 		jffs2_link_node_ref(c, jeb,
1659bfeb691SDavid Woodhouse 				    (jeb->offset+c->sector_size-oldfree) | REF_OBSOLETE,
1669bfeb691SDavid Woodhouse 				    oldfree, NULL);
1679bfeb691SDavid Woodhouse 		/* convert to wasted */
1689bfeb691SDavid Woodhouse 		c->wasted_size += oldfree;
1699bfeb691SDavid Woodhouse 		jeb->wasted_size += oldfree;
1709bfeb691SDavid Woodhouse 		c->dirty_size -= oldfree;
1719bfeb691SDavid Woodhouse 		jeb->dirty_size -= oldfree;
1729bfeb691SDavid Woodhouse 	}
1731da177e4SLinus Torvalds 
174e0c8e42fSArtem B. Bityutskiy 	jffs2_dbg_dump_block_lists_nolock(c);
175e0c8e42fSArtem B. Bityutskiy 	jffs2_dbg_acct_sanity_check_nolock(c,jeb);
176e0c8e42fSArtem B. Bityutskiy 	jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
1771da177e4SLinus Torvalds }
1781da177e4SLinus Torvalds 
1799bfeb691SDavid Woodhouse static struct jffs2_raw_node_ref **jffs2_incore_replace_raw(struct jffs2_sb_info *c,
1809bfeb691SDavid Woodhouse 							    struct jffs2_inode_info *f,
1819bfeb691SDavid Woodhouse 							    struct jffs2_raw_node_ref *raw,
1829bfeb691SDavid Woodhouse 							    union jffs2_node_union *node)
1839bfeb691SDavid Woodhouse {
1849bfeb691SDavid Woodhouse 	struct jffs2_node_frag *frag;
1859bfeb691SDavid Woodhouse 	struct jffs2_full_dirent *fd;
1869bfeb691SDavid Woodhouse 
1879bfeb691SDavid Woodhouse 	dbg_noderef("incore_replace_raw: node at %p is {%04x,%04x}\n",
1889bfeb691SDavid Woodhouse 		    node, je16_to_cpu(node->u.magic), je16_to_cpu(node->u.nodetype));
1899bfeb691SDavid Woodhouse 
1909bfeb691SDavid Woodhouse 	BUG_ON(je16_to_cpu(node->u.magic) != 0x1985 &&
1919bfeb691SDavid Woodhouse 	       je16_to_cpu(node->u.magic) != 0);
1929bfeb691SDavid Woodhouse 
1939bfeb691SDavid Woodhouse 	switch (je16_to_cpu(node->u.nodetype)) {
1949bfeb691SDavid Woodhouse 	case JFFS2_NODETYPE_INODE:
195ddc58bd6SDavid Woodhouse 		if (f->metadata && f->metadata->raw == raw) {
196ddc58bd6SDavid Woodhouse 			dbg_noderef("Will replace ->raw in f->metadata at %p\n", f->metadata);
197ddc58bd6SDavid Woodhouse 			return &f->metadata->raw;
198ddc58bd6SDavid Woodhouse 		}
1999bfeb691SDavid Woodhouse 		frag = jffs2_lookup_node_frag(&f->fragtree, je32_to_cpu(node->i.offset));
2009bfeb691SDavid Woodhouse 		BUG_ON(!frag);
2019bfeb691SDavid Woodhouse 		/* Find a frag which refers to the full_dnode we want to modify */
2029bfeb691SDavid Woodhouse 		while (!frag->node || frag->node->raw != raw) {
2039bfeb691SDavid Woodhouse 			frag = frag_next(frag);
2049bfeb691SDavid Woodhouse 			BUG_ON(!frag);
2059bfeb691SDavid Woodhouse 		}
2069bfeb691SDavid Woodhouse 		dbg_noderef("Will replace ->raw in full_dnode at %p\n", frag->node);
2079bfeb691SDavid Woodhouse 		return &frag->node->raw;
2089bfeb691SDavid Woodhouse 
2099bfeb691SDavid Woodhouse 	case JFFS2_NODETYPE_DIRENT:
2109bfeb691SDavid Woodhouse 		for (fd = f->dents; fd; fd = fd->next) {
2119bfeb691SDavid Woodhouse 			if (fd->raw == raw) {
2129bfeb691SDavid Woodhouse 				dbg_noderef("Will replace ->raw in full_dirent at %p\n", fd);
2139bfeb691SDavid Woodhouse 				return &fd->raw;
2149bfeb691SDavid Woodhouse 			}
2159bfeb691SDavid Woodhouse 		}
2169bfeb691SDavid Woodhouse 		BUG();
217ddc58bd6SDavid Woodhouse 
2189bfeb691SDavid Woodhouse 	default:
2199bfeb691SDavid Woodhouse 		dbg_noderef("Don't care about replacing raw for nodetype %x\n",
2209bfeb691SDavid Woodhouse 			    je16_to_cpu(node->u.nodetype));
2219bfeb691SDavid Woodhouse 		break;
2229bfeb691SDavid Woodhouse 	}
2239bfeb691SDavid Woodhouse 	return NULL;
2249bfeb691SDavid Woodhouse }
2259bfeb691SDavid Woodhouse 
226a6bc432eSDavid Woodhouse #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
227a6bc432eSDavid Woodhouse static int jffs2_verify_write(struct jffs2_sb_info *c, unsigned char *buf,
228a6bc432eSDavid Woodhouse 			      uint32_t ofs)
229a6bc432eSDavid Woodhouse {
230a6bc432eSDavid Woodhouse 	int ret;
231a6bc432eSDavid Woodhouse 	size_t retlen;
232a6bc432eSDavid Woodhouse 	char *eccstr;
233a6bc432eSDavid Woodhouse 
234329ad399SArtem Bityutskiy 	ret = mtd_read(c->mtd, ofs, c->wbuf_pagesize, &retlen, c->wbuf_verify);
235a6bc432eSDavid Woodhouse 	if (ret && ret != -EUCLEAN && ret != -EBADMSG) {
236a6bc432eSDavid Woodhouse 		printk(KERN_WARNING "jffs2_verify_write(): Read back of page at %08x failed: %d\n", c->wbuf_ofs, ret);
237a6bc432eSDavid Woodhouse 		return ret;
238a6bc432eSDavid Woodhouse 	} else if (retlen != c->wbuf_pagesize) {
239a6bc432eSDavid Woodhouse 		printk(KERN_WARNING "jffs2_verify_write(): Read back of page at %08x gave short read: %zd not %d.\n", ofs, retlen, c->wbuf_pagesize);
240a6bc432eSDavid Woodhouse 		return -EIO;
241a6bc432eSDavid Woodhouse 	}
242a6bc432eSDavid Woodhouse 	if (!memcmp(buf, c->wbuf_verify, c->wbuf_pagesize))
243a6bc432eSDavid Woodhouse 		return 0;
244a6bc432eSDavid Woodhouse 
245a6bc432eSDavid Woodhouse 	if (ret == -EUCLEAN)
246a6bc432eSDavid Woodhouse 		eccstr = "corrected";
247a6bc432eSDavid Woodhouse 	else if (ret == -EBADMSG)
248a6bc432eSDavid Woodhouse 		eccstr = "correction failed";
249a6bc432eSDavid Woodhouse 	else
250a6bc432eSDavid Woodhouse 		eccstr = "OK or unused";
251a6bc432eSDavid Woodhouse 
252a6bc432eSDavid Woodhouse 	printk(KERN_WARNING "Write verify error (ECC %s) at %08x. Wrote:\n",
253a6bc432eSDavid Woodhouse 	       eccstr, c->wbuf_ofs);
254a6bc432eSDavid Woodhouse 	print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1,
255a6bc432eSDavid Woodhouse 		       c->wbuf, c->wbuf_pagesize, 0);
256a6bc432eSDavid Woodhouse 
257a6bc432eSDavid Woodhouse 	printk(KERN_WARNING "Read back:\n");
258a6bc432eSDavid Woodhouse 	print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1,
259a6bc432eSDavid Woodhouse 		       c->wbuf_verify, c->wbuf_pagesize, 0);
260a6bc432eSDavid Woodhouse 
261a6bc432eSDavid Woodhouse 	return -EIO;
262a6bc432eSDavid Woodhouse }
263a6bc432eSDavid Woodhouse #else
264a6bc432eSDavid Woodhouse #define jffs2_verify_write(c,b,o) (0)
265a6bc432eSDavid Woodhouse #endif
266a6bc432eSDavid Woodhouse 
2671da177e4SLinus Torvalds /* Recover from failure to write wbuf. Recover the nodes up to the
2681da177e4SLinus Torvalds  * wbuf, not the one which we were starting to try to write. */
2691da177e4SLinus Torvalds 
2701da177e4SLinus Torvalds static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
2711da177e4SLinus Torvalds {
2721da177e4SLinus Torvalds 	struct jffs2_eraseblock *jeb, *new_jeb;
2739bfeb691SDavid Woodhouse 	struct jffs2_raw_node_ref *raw, *next, *first_raw = NULL;
2741da177e4SLinus Torvalds 	size_t retlen;
2751da177e4SLinus Torvalds 	int ret;
2769bfeb691SDavid Woodhouse 	int nr_refile = 0;
2771da177e4SLinus Torvalds 	unsigned char *buf;
2781da177e4SLinus Torvalds 	uint32_t start, end, ofs, len;
2791da177e4SLinus Torvalds 
280046b8b98SDavid Woodhouse 	jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
281046b8b98SDavid Woodhouse 
2821da177e4SLinus Torvalds 	spin_lock(&c->erase_completion_lock);
283180bfb31SVitaly Wool 	if (c->wbuf_ofs % c->mtd->erasesize)
2847f716cf3SEstelle Hammache 		jffs2_block_refile(c, jeb, REFILE_NOTEMPTY);
285180bfb31SVitaly Wool 	else
286180bfb31SVitaly Wool 		jffs2_block_refile(c, jeb, REFILE_ANYWAY);
2879bfeb691SDavid Woodhouse 	spin_unlock(&c->erase_completion_lock);
2889bfeb691SDavid Woodhouse 
2899bfeb691SDavid Woodhouse 	BUG_ON(!ref_obsolete(jeb->last_node));
2901da177e4SLinus Torvalds 
2911da177e4SLinus Torvalds 	/* Find the first node to be recovered, by skipping over every
2921da177e4SLinus Torvalds 	   node which ends before the wbuf starts, or which is obsolete. */
2939bfeb691SDavid Woodhouse 	for (next = raw = jeb->first_node; next; raw = next) {
2949bfeb691SDavid Woodhouse 		next = ref_next(raw);
2959bfeb691SDavid Woodhouse 
2969bfeb691SDavid Woodhouse 		if (ref_obsolete(raw) ||
2979bfeb691SDavid Woodhouse 		    (next && ref_offset(next) <= c->wbuf_ofs)) {
2989bfeb691SDavid Woodhouse 			dbg_noderef("Skipping node at 0x%08x(%d)-0x%08x which is either before 0x%08x or obsolete\n",
2999bfeb691SDavid Woodhouse 				    ref_offset(raw), ref_flags(raw),
3009bfeb691SDavid Woodhouse 				    (ref_offset(raw) + ref_totlen(c, jeb, raw)),
3019bfeb691SDavid Woodhouse 				    c->wbuf_ofs);
3029bfeb691SDavid Woodhouse 			continue;
3039bfeb691SDavid Woodhouse 		}
3049bfeb691SDavid Woodhouse 		dbg_noderef("First node to be recovered is at 0x%08x(%d)-0x%08x\n",
3059bfeb691SDavid Woodhouse 			    ref_offset(raw), ref_flags(raw),
3069bfeb691SDavid Woodhouse 			    (ref_offset(raw) + ref_totlen(c, jeb, raw)));
3079bfeb691SDavid Woodhouse 
3089bfeb691SDavid Woodhouse 		first_raw = raw;
3099bfeb691SDavid Woodhouse 		break;
3101da177e4SLinus Torvalds 	}
3111da177e4SLinus Torvalds 
3129bfeb691SDavid Woodhouse 	if (!first_raw) {
3131da177e4SLinus Torvalds 		/* All nodes were obsolete. Nothing to recover. */
3149c261b33SJoe Perches 		jffs2_dbg(1, "No non-obsolete nodes to be recovered. Just filing block bad\n");
3159bfeb691SDavid Woodhouse 		c->wbuf_len = 0;
3161da177e4SLinus Torvalds 		return;
3171da177e4SLinus Torvalds 	}
3181da177e4SLinus Torvalds 
3199bfeb691SDavid Woodhouse 	start = ref_offset(first_raw);
3209bfeb691SDavid Woodhouse 	end = ref_offset(jeb->last_node);
3219bfeb691SDavid Woodhouse 	nr_refile = 1;
3221da177e4SLinus Torvalds 
3239bfeb691SDavid Woodhouse 	/* Count the number of refs which need to be copied */
3249bfeb691SDavid Woodhouse 	while ((raw = ref_next(raw)) != jeb->last_node)
3259bfeb691SDavid Woodhouse 		nr_refile++;
3261da177e4SLinus Torvalds 
3279bfeb691SDavid Woodhouse 	dbg_noderef("wbuf recover %08x-%08x (%d bytes in %d nodes)\n",
3289bfeb691SDavid Woodhouse 		    start, end, end - start, nr_refile);
3291da177e4SLinus Torvalds 
3301da177e4SLinus Torvalds 	buf = NULL;
3311da177e4SLinus Torvalds 	if (start < c->wbuf_ofs) {
3321da177e4SLinus Torvalds 		/* First affected node was already partially written.
3331da177e4SLinus Torvalds 		 * Attempt to reread the old data into our buffer. */
3341da177e4SLinus Torvalds 
3351da177e4SLinus Torvalds 		buf = kmalloc(end - start, GFP_KERNEL);
3361da177e4SLinus Torvalds 		if (!buf) {
3371da177e4SLinus Torvalds 			printk(KERN_CRIT "Malloc failure in wbuf recovery. Data loss ensues.\n");
3381da177e4SLinus Torvalds 
3391da177e4SLinus Torvalds 			goto read_failed;
3401da177e4SLinus Torvalds 		}
3411da177e4SLinus Torvalds 
3421da177e4SLinus Torvalds 		/* Do the read... */
343329ad399SArtem Bityutskiy 		ret = mtd_read(c->mtd, start, c->wbuf_ofs - start, &retlen,
344329ad399SArtem Bityutskiy 			       buf);
3451da177e4SLinus Torvalds 
3469a1fcdfdSThomas Gleixner 		/* ECC recovered ? */
3479a1fcdfdSThomas Gleixner 		if ((ret == -EUCLEAN || ret == -EBADMSG) &&
3489a1fcdfdSThomas Gleixner 		    (retlen == c->wbuf_ofs - start))
3491da177e4SLinus Torvalds 			ret = 0;
3509a1fcdfdSThomas Gleixner 
3511da177e4SLinus Torvalds 		if (ret || retlen != c->wbuf_ofs - start) {
3521da177e4SLinus Torvalds 			printk(KERN_CRIT "Old data are already lost in wbuf recovery. Data loss ensues.\n");
3531da177e4SLinus Torvalds 
3541da177e4SLinus Torvalds 			kfree(buf);
3551da177e4SLinus Torvalds 			buf = NULL;
3561da177e4SLinus Torvalds 		read_failed:
3579bfeb691SDavid Woodhouse 			first_raw = ref_next(first_raw);
3589bfeb691SDavid Woodhouse 			nr_refile--;
3599bfeb691SDavid Woodhouse 			while (first_raw && ref_obsolete(first_raw)) {
3609bfeb691SDavid Woodhouse 				first_raw = ref_next(first_raw);
3619bfeb691SDavid Woodhouse 				nr_refile--;
3629bfeb691SDavid Woodhouse 			}
3639bfeb691SDavid Woodhouse 
3641da177e4SLinus Torvalds 			/* If this was the only node to be recovered, give up */
3659bfeb691SDavid Woodhouse 			if (!first_raw) {
3669bfeb691SDavid Woodhouse 				c->wbuf_len = 0;
3671da177e4SLinus Torvalds 				return;
3689bfeb691SDavid Woodhouse 			}
3691da177e4SLinus Torvalds 
3701da177e4SLinus Torvalds 			/* It wasn't. Go on and try to recover nodes complete in the wbuf */
3719bfeb691SDavid Woodhouse 			start = ref_offset(first_raw);
3729bfeb691SDavid Woodhouse 			dbg_noderef("wbuf now recover %08x-%08x (%d bytes in %d nodes)\n",
3739bfeb691SDavid Woodhouse 				    start, end, end - start, nr_refile);
3749bfeb691SDavid Woodhouse 
3751da177e4SLinus Torvalds 		} else {
3761da177e4SLinus Torvalds 			/* Read succeeded. Copy the remaining data from the wbuf */
3771da177e4SLinus Torvalds 			memcpy(buf + (c->wbuf_ofs - start), c->wbuf, end - c->wbuf_ofs);
3781da177e4SLinus Torvalds 		}
3791da177e4SLinus Torvalds 	}
3801da177e4SLinus Torvalds 	/* OK... we're to rewrite (end-start) bytes of data from first_raw onwards.
3811da177e4SLinus Torvalds 	   Either 'buf' contains the data, or we find it in the wbuf */
3821da177e4SLinus Torvalds 
3831da177e4SLinus Torvalds 	/* ... and get an allocation of space from a shiny new block instead */
3849fe4854cSDavid Woodhouse 	ret = jffs2_reserve_space_gc(c, end-start, &len, JFFS2_SUMMARY_NOSUM_SIZE);
3851da177e4SLinus Torvalds 	if (ret) {
3861da177e4SLinus Torvalds 		printk(KERN_WARNING "Failed to allocate space for wbuf recovery. Data loss ensues.\n");
3871da177e4SLinus Torvalds 		kfree(buf);
3881da177e4SLinus Torvalds 		return;
3891da177e4SLinus Torvalds 	}
3909bfeb691SDavid Woodhouse 
3917f762ab2SAdrian Hunter 	/* The summary is not recovered, so it must be disabled for this erase block */
3927f762ab2SAdrian Hunter 	jffs2_sum_disable_collecting(c->summary);
3937f762ab2SAdrian Hunter 
3949bfeb691SDavid Woodhouse 	ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, nr_refile);
3959bfeb691SDavid Woodhouse 	if (ret) {
3969bfeb691SDavid Woodhouse 		printk(KERN_WARNING "Failed to allocate node refs for wbuf recovery. Data loss ensues.\n");
3979bfeb691SDavid Woodhouse 		kfree(buf);
3989bfeb691SDavid Woodhouse 		return;
3999bfeb691SDavid Woodhouse 	}
4009bfeb691SDavid Woodhouse 
4019fe4854cSDavid Woodhouse 	ofs = write_ofs(c);
4029fe4854cSDavid Woodhouse 
4031da177e4SLinus Torvalds 	if (end-start >= c->wbuf_pagesize) {
4047f716cf3SEstelle Hammache 		/* Need to do another write immediately, but it's possible
4057f716cf3SEstelle Hammache 		   that this is just because the wbuf itself is completely
4067f716cf3SEstelle Hammache 		   full, and there's nothing earlier read back from the
4077f716cf3SEstelle Hammache 		   flash. Hence 'buf' isn't necessarily what we're writing
4087f716cf3SEstelle Hammache 		   from. */
4097f716cf3SEstelle Hammache 		unsigned char *rewrite_buf = buf?:c->wbuf;
4101da177e4SLinus Torvalds 		uint32_t towrite = (end-start) - ((end-start)%c->wbuf_pagesize);
4111da177e4SLinus Torvalds 
4129c261b33SJoe Perches 		jffs2_dbg(1, "Write 0x%x bytes at 0x%08x in wbuf recover\n",
4139c261b33SJoe Perches 			  towrite, ofs);
4141da177e4SLinus Torvalds 
4151da177e4SLinus Torvalds #ifdef BREAKMEHEADER
4161da177e4SLinus Torvalds 		static int breakme;
4171da177e4SLinus Torvalds 		if (breakme++ == 20) {
4181da177e4SLinus Torvalds 			printk(KERN_NOTICE "Faking write error at 0x%08x\n", ofs);
4191da177e4SLinus Torvalds 			breakme = 0;
420eda95cbfSArtem Bityutskiy 			mtd_write(c->mtd, ofs, towrite, &retlen, brokenbuf);
4211da177e4SLinus Torvalds 			ret = -EIO;
4221da177e4SLinus Torvalds 		} else
4231da177e4SLinus Torvalds #endif
424eda95cbfSArtem Bityutskiy 			ret = mtd_write(c->mtd, ofs, towrite, &retlen,
4259223a456SThomas Gleixner 					rewrite_buf);
4261da177e4SLinus Torvalds 
427a6bc432eSDavid Woodhouse 		if (ret || retlen != towrite || jffs2_verify_write(c, rewrite_buf, ofs)) {
4281da177e4SLinus Torvalds 			/* Argh. We tried. Really we did. */
4291da177e4SLinus Torvalds 			printk(KERN_CRIT "Recovery of wbuf failed due to a second write error\n");
4301da177e4SLinus Torvalds 			kfree(buf);
4311da177e4SLinus Torvalds 
4322f785402SDavid Woodhouse 			if (retlen)
4339bfeb691SDavid Woodhouse 				jffs2_add_physical_node_ref(c, ofs | REF_OBSOLETE, ref_totlen(c, jeb, first_raw), NULL);
4341da177e4SLinus Torvalds 
4351da177e4SLinus Torvalds 			return;
4361da177e4SLinus Torvalds 		}
4371da177e4SLinus Torvalds 		printk(KERN_NOTICE "Recovery of wbuf succeeded to %08x\n", ofs);
4381da177e4SLinus Torvalds 
4391da177e4SLinus Torvalds 		c->wbuf_len = (end - start) - towrite;
4401da177e4SLinus Torvalds 		c->wbuf_ofs = ofs + towrite;
4417f716cf3SEstelle Hammache 		memmove(c->wbuf, rewrite_buf + towrite, c->wbuf_len);
4421da177e4SLinus Torvalds 		/* Don't muck about with c->wbuf_inodes. False positives are harmless. */
4431da177e4SLinus Torvalds 	} else {
4441da177e4SLinus Torvalds 		/* OK, now we're left with the dregs in whichever buffer we're using */
4451da177e4SLinus Torvalds 		if (buf) {
4461da177e4SLinus Torvalds 			memcpy(c->wbuf, buf, end-start);
4471da177e4SLinus Torvalds 		} else {
4481da177e4SLinus Torvalds 			memmove(c->wbuf, c->wbuf + (start - c->wbuf_ofs), end - start);
4491da177e4SLinus Torvalds 		}
4501da177e4SLinus Torvalds 		c->wbuf_ofs = ofs;
4511da177e4SLinus Torvalds 		c->wbuf_len = end - start;
4521da177e4SLinus Torvalds 	}
4531da177e4SLinus Torvalds 
4541da177e4SLinus Torvalds 	/* Now sort out the jffs2_raw_node_refs, moving them from the old to the next block */
4551da177e4SLinus Torvalds 	new_jeb = &c->blocks[ofs / c->sector_size];
4561da177e4SLinus Torvalds 
4571da177e4SLinus Torvalds 	spin_lock(&c->erase_completion_lock);
4589bfeb691SDavid Woodhouse 	for (raw = first_raw; raw != jeb->last_node; raw = ref_next(raw)) {
4599bfeb691SDavid Woodhouse 		uint32_t rawlen = ref_totlen(c, jeb, raw);
4609bfeb691SDavid Woodhouse 		struct jffs2_inode_cache *ic;
4619bfeb691SDavid Woodhouse 		struct jffs2_raw_node_ref *new_ref;
4629bfeb691SDavid Woodhouse 		struct jffs2_raw_node_ref **adjust_ref = NULL;
4639bfeb691SDavid Woodhouse 		struct jffs2_inode_info *f = NULL;
4641da177e4SLinus Torvalds 
4659c261b33SJoe Perches 		jffs2_dbg(1, "Refiling block of %08x at %08x(%d) to %08x\n",
4669c261b33SJoe Perches 			  rawlen, ref_offset(raw), ref_flags(raw), ofs);
4671da177e4SLinus Torvalds 
4689bfeb691SDavid Woodhouse 		ic = jffs2_raw_ref_to_ic(raw);
4699bfeb691SDavid Woodhouse 
4709bfeb691SDavid Woodhouse 		/* Ick. This XATTR mess should be fixed shortly... */
4719bfeb691SDavid Woodhouse 		if (ic && ic->class == RAWNODE_CLASS_XATTR_DATUM) {
4729bfeb691SDavid Woodhouse 			struct jffs2_xattr_datum *xd = (void *)ic;
4739bfeb691SDavid Woodhouse 			BUG_ON(xd->node != raw);
4749bfeb691SDavid Woodhouse 			adjust_ref = &xd->node;
4759bfeb691SDavid Woodhouse 			raw->next_in_ino = NULL;
4769bfeb691SDavid Woodhouse 			ic = NULL;
4779bfeb691SDavid Woodhouse 		} else if (ic && ic->class == RAWNODE_CLASS_XATTR_REF) {
4789bfeb691SDavid Woodhouse 			struct jffs2_xattr_datum *xr = (void *)ic;
4799bfeb691SDavid Woodhouse 			BUG_ON(xr->node != raw);
4809bfeb691SDavid Woodhouse 			adjust_ref = &xr->node;
4819bfeb691SDavid Woodhouse 			raw->next_in_ino = NULL;
4829bfeb691SDavid Woodhouse 			ic = NULL;
4839bfeb691SDavid Woodhouse 		} else if (ic && ic->class == RAWNODE_CLASS_INODE_CACHE) {
4849bfeb691SDavid Woodhouse 			struct jffs2_raw_node_ref **p = &ic->nodes;
4859bfeb691SDavid Woodhouse 
4869bfeb691SDavid Woodhouse 			/* Remove the old node from the per-inode list */
4879bfeb691SDavid Woodhouse 			while (*p && *p != (void *)ic) {
4889bfeb691SDavid Woodhouse 				if (*p == raw) {
4899bfeb691SDavid Woodhouse 					(*p) = (raw->next_in_ino);
4909bfeb691SDavid Woodhouse 					raw->next_in_ino = NULL;
4919bfeb691SDavid Woodhouse 					break;
4929bfeb691SDavid Woodhouse 				}
4939bfeb691SDavid Woodhouse 				p = &((*p)->next_in_ino);
4949bfeb691SDavid Woodhouse 			}
4959bfeb691SDavid Woodhouse 
4969bfeb691SDavid Woodhouse 			if (ic->state == INO_STATE_PRESENT && !ref_obsolete(raw)) {
4979bfeb691SDavid Woodhouse 				/* If it's an in-core inode, then we have to adjust any
4989bfeb691SDavid Woodhouse 				   full_dirent or full_dnode structure to point to the
4999bfeb691SDavid Woodhouse 				   new version instead of the old */
50027c72b04SDavid Woodhouse 				f = jffs2_gc_fetch_inode(c, ic->ino, !ic->pino_nlink);
5019bfeb691SDavid Woodhouse 				if (IS_ERR(f)) {
5029bfeb691SDavid Woodhouse 					/* Should never happen; it _must_ be present */
5039bfeb691SDavid Woodhouse 					JFFS2_ERROR("Failed to iget() ino #%u, err %ld\n",
5049bfeb691SDavid Woodhouse 						    ic->ino, PTR_ERR(f));
5059bfeb691SDavid Woodhouse 					BUG();
5069bfeb691SDavid Woodhouse 				}
5079bfeb691SDavid Woodhouse 				/* We don't lock f->sem. There's a number of ways we could
5089bfeb691SDavid Woodhouse 				   end up in here with it already being locked, and nobody's
5099bfeb691SDavid Woodhouse 				   going to modify it on us anyway because we hold the
5109bfeb691SDavid Woodhouse 				   alloc_sem. We're only changing one ->raw pointer too,
5119bfeb691SDavid Woodhouse 				   which we can get away with without upsetting readers. */
5129bfeb691SDavid Woodhouse 				adjust_ref = jffs2_incore_replace_raw(c, f, raw,
5139bfeb691SDavid Woodhouse 								      (void *)(buf?:c->wbuf) + (ref_offset(raw) - start));
5149bfeb691SDavid Woodhouse 			} else if (unlikely(ic->state != INO_STATE_PRESENT &&
5159bfeb691SDavid Woodhouse 					    ic->state != INO_STATE_CHECKEDABSENT &&
5169bfeb691SDavid Woodhouse 					    ic->state != INO_STATE_GC)) {
5179bfeb691SDavid Woodhouse 				JFFS2_ERROR("Inode #%u is in strange state %d!\n", ic->ino, ic->state);
5189bfeb691SDavid Woodhouse 				BUG();
5199bfeb691SDavid Woodhouse 			}
5209bfeb691SDavid Woodhouse 		}
5219bfeb691SDavid Woodhouse 
5229bfeb691SDavid Woodhouse 		new_ref = jffs2_link_node_ref(c, new_jeb, ofs | ref_flags(raw), rawlen, ic);
5239bfeb691SDavid Woodhouse 
5249bfeb691SDavid Woodhouse 		if (adjust_ref) {
5259bfeb691SDavid Woodhouse 			BUG_ON(*adjust_ref != raw);
5269bfeb691SDavid Woodhouse 			*adjust_ref = new_ref;
5279bfeb691SDavid Woodhouse 		}
5289bfeb691SDavid Woodhouse 		if (f)
5299bfeb691SDavid Woodhouse 			jffs2_gc_release_inode(c, f);
5309bfeb691SDavid Woodhouse 
5319bfeb691SDavid Woodhouse 		if (!ref_obsolete(raw)) {
5321da177e4SLinus Torvalds 			jeb->dirty_size += rawlen;
5331da177e4SLinus Torvalds 			jeb->used_size  -= rawlen;
5341da177e4SLinus Torvalds 			c->dirty_size += rawlen;
5359bfeb691SDavid Woodhouse 			c->used_size -= rawlen;
5369bfeb691SDavid Woodhouse 			raw->flash_offset = ref_offset(raw) | REF_OBSOLETE;
5379bfeb691SDavid Woodhouse 			BUG_ON(raw->next_in_ino);
5381da177e4SLinus Torvalds 		}
5391da177e4SLinus Torvalds 		ofs += rawlen;
5401da177e4SLinus Torvalds 	}
5411da177e4SLinus Torvalds 
5429bfeb691SDavid Woodhouse 	kfree(buf);
5439bfeb691SDavid Woodhouse 
5441da177e4SLinus Torvalds 	/* Fix up the original jeb now it's on the bad_list */
5459bfeb691SDavid Woodhouse 	if (first_raw == jeb->first_node) {
5469c261b33SJoe Perches 		jffs2_dbg(1, "Failing block at %08x is now empty. Moving to erase_pending_list\n",
5479c261b33SJoe Perches 			  jeb->offset);
548f116629dSAkinobu Mita 		list_move(&jeb->list, &c->erase_pending_list);
5491da177e4SLinus Torvalds 		c->nr_erasing_blocks++;
550ae3b6ba0SDavid Woodhouse 		jffs2_garbage_collect_trigger(c);
5511da177e4SLinus Torvalds 	}
5521da177e4SLinus Torvalds 
553e0c8e42fSArtem B. Bityutskiy 	jffs2_dbg_acct_sanity_check_nolock(c, jeb);
554e0c8e42fSArtem B. Bityutskiy 	jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
5551da177e4SLinus Torvalds 
556e0c8e42fSArtem B. Bityutskiy 	jffs2_dbg_acct_sanity_check_nolock(c, new_jeb);
557e0c8e42fSArtem B. Bityutskiy 	jffs2_dbg_acct_paranoia_check_nolock(c, new_jeb);
5581da177e4SLinus Torvalds 
5591da177e4SLinus Torvalds 	spin_unlock(&c->erase_completion_lock);
5601da177e4SLinus Torvalds 
5619c261b33SJoe Perches 	jffs2_dbg(1, "wbuf recovery completed OK. wbuf_ofs 0x%08x, len 0x%x\n",
5629c261b33SJoe Perches 		  c->wbuf_ofs, c->wbuf_len);
5639bfeb691SDavid Woodhouse 
5641da177e4SLinus Torvalds }
5651da177e4SLinus Torvalds 
5661da177e4SLinus Torvalds /* Meaning of pad argument:
5671da177e4SLinus Torvalds    0: Do not pad. Probably pointless - we only ever use this when we can't pad anyway.
5681da177e4SLinus Torvalds    1: Pad, do not adjust nextblock free_size
5691da177e4SLinus Torvalds    2: Pad, adjust nextblock free_size
5701da177e4SLinus Torvalds */
5711da177e4SLinus Torvalds #define NOPAD		0
5721da177e4SLinus Torvalds #define PAD_NOACCOUNT	1
5731da177e4SLinus Torvalds #define PAD_ACCOUNTING	2
5741da177e4SLinus Torvalds 
5751da177e4SLinus Torvalds static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
5761da177e4SLinus Torvalds {
5779bfeb691SDavid Woodhouse 	struct jffs2_eraseblock *wbuf_jeb;
5781da177e4SLinus Torvalds 	int ret;
5791da177e4SLinus Torvalds 	size_t retlen;
5801da177e4SLinus Torvalds 
5813be36675SAndrew Victor 	/* Nothing to do if not write-buffering the flash. In particular, we shouldn't
5821da177e4SLinus Torvalds 	   del_timer() the timer we never initialised. */
5833be36675SAndrew Victor 	if (!jffs2_is_writebuffered(c))
5841da177e4SLinus Torvalds 		return 0;
5851da177e4SLinus Torvalds 
58651b11e36SAlexey Khoroshilov 	if (!mutex_is_locked(&c->alloc_sem)) {
5871da177e4SLinus Torvalds 		printk(KERN_CRIT "jffs2_flush_wbuf() called with alloc_sem not locked!\n");
5881da177e4SLinus Torvalds 		BUG();
5891da177e4SLinus Torvalds 	}
5901da177e4SLinus Torvalds 
5913be36675SAndrew Victor 	if (!c->wbuf_len)	/* already checked c->wbuf above */
5921da177e4SLinus Torvalds 		return 0;
5931da177e4SLinus Torvalds 
5949bfeb691SDavid Woodhouse 	wbuf_jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
5959bfeb691SDavid Woodhouse 	if (jffs2_prealloc_raw_node_refs(c, wbuf_jeb, c->nextblock->allocated_refs + 1))
5962f785402SDavid Woodhouse 		return -ENOMEM;
5972f785402SDavid Woodhouse 
5981da177e4SLinus Torvalds 	/* claim remaining space on the page
5991da177e4SLinus Torvalds 	   this happens, if we have a change to a new block,
6001da177e4SLinus Torvalds 	   or if fsync forces us to flush the writebuffer.
6011da177e4SLinus Torvalds 	   if we have a switch to next page, we will not have
6021da177e4SLinus Torvalds 	   enough remaining space for this.
6031da177e4SLinus Torvalds 	*/
604daba5cc4SArtem B. Bityutskiy 	if (pad ) {
6051da177e4SLinus Torvalds 		c->wbuf_len = PAD(c->wbuf_len);
6061da177e4SLinus Torvalds 
6071da177e4SLinus Torvalds 		/* Pad with JFFS2_DIRTY_BITMASK initially.  this helps out ECC'd NOR
6081da177e4SLinus Torvalds 		   with 8 byte page size */
6091da177e4SLinus Torvalds 		memset(c->wbuf + c->wbuf_len, 0, c->wbuf_pagesize - c->wbuf_len);
6101da177e4SLinus Torvalds 
6111da177e4SLinus Torvalds 		if ( c->wbuf_len + sizeof(struct jffs2_unknown_node) < c->wbuf_pagesize) {
6121da177e4SLinus Torvalds 			struct jffs2_unknown_node *padnode = (void *)(c->wbuf + c->wbuf_len);
6131da177e4SLinus Torvalds 			padnode->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
6141da177e4SLinus Torvalds 			padnode->nodetype = cpu_to_je16(JFFS2_NODETYPE_PADDING);
6151da177e4SLinus Torvalds 			padnode->totlen = cpu_to_je32(c->wbuf_pagesize - c->wbuf_len);
6161da177e4SLinus Torvalds 			padnode->hdr_crc = cpu_to_je32(crc32(0, padnode, sizeof(*padnode)-4));
6171da177e4SLinus Torvalds 		}
6181da177e4SLinus Torvalds 	}
6191da177e4SLinus Torvalds 	/* else jffs2_flash_writev has actually filled in the rest of the
6201da177e4SLinus Torvalds 	   buffer for us, and will deal with the node refs etc. later. */
6211da177e4SLinus Torvalds 
6221da177e4SLinus Torvalds #ifdef BREAKME
6231da177e4SLinus Torvalds 	static int breakme;
6241da177e4SLinus Torvalds 	if (breakme++ == 20) {
6251da177e4SLinus Torvalds 		printk(KERN_NOTICE "Faking write error at 0x%08x\n", c->wbuf_ofs);
6261da177e4SLinus Torvalds 		breakme = 0;
627eda95cbfSArtem Bityutskiy 		mtd_write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen,
6289223a456SThomas Gleixner 			  brokenbuf);
6291da177e4SLinus Torvalds 		ret = -EIO;
6301da177e4SLinus Torvalds 	} else
6311da177e4SLinus Torvalds #endif
6321da177e4SLinus Torvalds 
633eda95cbfSArtem Bityutskiy 		ret = mtd_write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize,
634eda95cbfSArtem Bityutskiy 				&retlen, c->wbuf);
6351da177e4SLinus Torvalds 
636a6bc432eSDavid Woodhouse 	if (ret) {
6371da177e4SLinus Torvalds 		printk(KERN_WARNING "jffs2_flush_wbuf(): Write failed with %d\n", ret);
638a6bc432eSDavid Woodhouse 		goto wfail;
639a6bc432eSDavid Woodhouse 	} else if (retlen != c->wbuf_pagesize) {
6401da177e4SLinus Torvalds 		printk(KERN_WARNING "jffs2_flush_wbuf(): Write was short: %zd instead of %d\n",
6411da177e4SLinus Torvalds 		       retlen, c->wbuf_pagesize);
6421da177e4SLinus Torvalds 		ret = -EIO;
643a6bc432eSDavid Woodhouse 		goto wfail;
644a6bc432eSDavid Woodhouse 	} else if ((ret = jffs2_verify_write(c, c->wbuf, c->wbuf_ofs))) {
645a6bc432eSDavid Woodhouse 	wfail:
6461da177e4SLinus Torvalds 		jffs2_wbuf_recover(c);
6471da177e4SLinus Torvalds 
6481da177e4SLinus Torvalds 		return ret;
6491da177e4SLinus Torvalds 	}
6501da177e4SLinus Torvalds 
6511da177e4SLinus Torvalds 	/* Adjust free size of the block if we padded. */
652daba5cc4SArtem B. Bityutskiy 	if (pad) {
6530bcc099dSDavid Woodhouse 		uint32_t waste = c->wbuf_pagesize - c->wbuf_len;
6541da177e4SLinus Torvalds 
6559c261b33SJoe Perches 		jffs2_dbg(1, "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n",
6569c261b33SJoe Perches 			  (wbuf_jeb == c->nextblock) ? "next" : "",
6579c261b33SJoe Perches 			  wbuf_jeb->offset);
6581da177e4SLinus Torvalds 
6591da177e4SLinus Torvalds 		/* wbuf_pagesize - wbuf_len is the amount of space that's to be
6601da177e4SLinus Torvalds 		   padded. If there is less free space in the block than that,
6611da177e4SLinus Torvalds 		   something screwed up */
6629bfeb691SDavid Woodhouse 		if (wbuf_jeb->free_size < waste) {
6631da177e4SLinus Torvalds 			printk(KERN_CRIT "jffs2_flush_wbuf(): Accounting error. wbuf at 0x%08x has 0x%03x bytes, 0x%03x left.\n",
6640bcc099dSDavid Woodhouse 			       c->wbuf_ofs, c->wbuf_len, waste);
6651da177e4SLinus Torvalds 			printk(KERN_CRIT "jffs2_flush_wbuf(): But free_size for block at 0x%08x is only 0x%08x\n",
6669bfeb691SDavid Woodhouse 			       wbuf_jeb->offset, wbuf_jeb->free_size);
6671da177e4SLinus Torvalds 			BUG();
6681da177e4SLinus Torvalds 		}
6690bcc099dSDavid Woodhouse 
6700bcc099dSDavid Woodhouse 		spin_lock(&c->erase_completion_lock);
6710bcc099dSDavid Woodhouse 
6729bfeb691SDavid Woodhouse 		jffs2_link_node_ref(c, wbuf_jeb, (c->wbuf_ofs + c->wbuf_len) | REF_OBSOLETE, waste, NULL);
6730bcc099dSDavid Woodhouse 		/* FIXME: that made it count as dirty. Convert to wasted */
6749bfeb691SDavid Woodhouse 		wbuf_jeb->dirty_size -= waste;
6750bcc099dSDavid Woodhouse 		c->dirty_size -= waste;
6769bfeb691SDavid Woodhouse 		wbuf_jeb->wasted_size += waste;
6770bcc099dSDavid Woodhouse 		c->wasted_size += waste;
6780bcc099dSDavid Woodhouse 	} else
6790bcc099dSDavid Woodhouse 		spin_lock(&c->erase_completion_lock);
6801da177e4SLinus Torvalds 
6811da177e4SLinus Torvalds 	/* Stick any now-obsoleted blocks on the erase_pending_list */
6821da177e4SLinus Torvalds 	jffs2_refile_wbuf_blocks(c);
6831da177e4SLinus Torvalds 	jffs2_clear_wbuf_ino_list(c);
6841da177e4SLinus Torvalds 	spin_unlock(&c->erase_completion_lock);
6851da177e4SLinus Torvalds 
6861da177e4SLinus Torvalds 	memset(c->wbuf,0xff,c->wbuf_pagesize);
6871da177e4SLinus Torvalds 	/* adjust write buffer offset, else we get a non contiguous write bug */
6881da177e4SLinus Torvalds 	c->wbuf_ofs += c->wbuf_pagesize;
6891da177e4SLinus Torvalds 	c->wbuf_len = 0;
6901da177e4SLinus Torvalds 	return 0;
6911da177e4SLinus Torvalds }
6921da177e4SLinus Torvalds 
6931da177e4SLinus Torvalds /* Trigger garbage collection to flush the write-buffer.
6941da177e4SLinus Torvalds    If ino arg is zero, do it if _any_ real (i.e. not GC) writes are
6951da177e4SLinus Torvalds    outstanding. If ino arg non-zero, do it only if a write for the
6961da177e4SLinus Torvalds    given inode is outstanding. */
6971da177e4SLinus Torvalds int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino)
6981da177e4SLinus Torvalds {
6991da177e4SLinus Torvalds 	uint32_t old_wbuf_ofs;
7001da177e4SLinus Torvalds 	uint32_t old_wbuf_len;
7011da177e4SLinus Torvalds 	int ret = 0;
7021da177e4SLinus Torvalds 
7039c261b33SJoe Perches 	jffs2_dbg(1, "jffs2_flush_wbuf_gc() called for ino #%u...\n", ino);
7041da177e4SLinus Torvalds 
7058aee6ac1SDavid Woodhouse 	if (!c->wbuf)
7068aee6ac1SDavid Woodhouse 		return 0;
7078aee6ac1SDavid Woodhouse 
708ced22070SDavid Woodhouse 	mutex_lock(&c->alloc_sem);
7091da177e4SLinus Torvalds 	if (!jffs2_wbuf_pending_for_ino(c, ino)) {
7109c261b33SJoe Perches 		jffs2_dbg(1, "Ino #%d not pending in wbuf. Returning\n", ino);
711ced22070SDavid Woodhouse 		mutex_unlock(&c->alloc_sem);
7121da177e4SLinus Torvalds 		return 0;
7131da177e4SLinus Torvalds 	}
7141da177e4SLinus Torvalds 
7151da177e4SLinus Torvalds 	old_wbuf_ofs = c->wbuf_ofs;
7161da177e4SLinus Torvalds 	old_wbuf_len = c->wbuf_len;
7171da177e4SLinus Torvalds 
7181da177e4SLinus Torvalds 	if (c->unchecked_size) {
7191da177e4SLinus Torvalds 		/* GC won't make any progress for a while */
7209c261b33SJoe Perches 		jffs2_dbg(1, "%s(): padding. Not finished checking\n",
7219c261b33SJoe Perches 			  __func__);
7221da177e4SLinus Torvalds 		down_write(&c->wbuf_sem);
7231da177e4SLinus Torvalds 		ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
7247f716cf3SEstelle Hammache 		/* retry flushing wbuf in case jffs2_wbuf_recover
7257f716cf3SEstelle Hammache 		   left some data in the wbuf */
7267f716cf3SEstelle Hammache 		if (ret)
7277f716cf3SEstelle Hammache 			ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
7281da177e4SLinus Torvalds 		up_write(&c->wbuf_sem);
7291da177e4SLinus Torvalds 	} else while (old_wbuf_len &&
7301da177e4SLinus Torvalds 		      old_wbuf_ofs == c->wbuf_ofs) {
7311da177e4SLinus Torvalds 
732ced22070SDavid Woodhouse 		mutex_unlock(&c->alloc_sem);
7331da177e4SLinus Torvalds 
7349c261b33SJoe Perches 		jffs2_dbg(1, "%s(): calls gc pass\n", __func__);
7351da177e4SLinus Torvalds 
7361da177e4SLinus Torvalds 		ret = jffs2_garbage_collect_pass(c);
7371da177e4SLinus Torvalds 		if (ret) {
7381da177e4SLinus Torvalds 			/* GC failed. Flush it with padding instead */
739ced22070SDavid Woodhouse 			mutex_lock(&c->alloc_sem);
7401da177e4SLinus Torvalds 			down_write(&c->wbuf_sem);
7411da177e4SLinus Torvalds 			ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
7427f716cf3SEstelle Hammache 			/* retry flushing wbuf in case jffs2_wbuf_recover
7437f716cf3SEstelle Hammache 			   left some data in the wbuf */
7447f716cf3SEstelle Hammache 			if (ret)
7457f716cf3SEstelle Hammache 				ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
7461da177e4SLinus Torvalds 			up_write(&c->wbuf_sem);
7471da177e4SLinus Torvalds 			break;
7481da177e4SLinus Torvalds 		}
749ced22070SDavid Woodhouse 		mutex_lock(&c->alloc_sem);
7501da177e4SLinus Torvalds 	}
7511da177e4SLinus Torvalds 
7529c261b33SJoe Perches 	jffs2_dbg(1, "%s(): ends...\n", __func__);
7531da177e4SLinus Torvalds 
754ced22070SDavid Woodhouse 	mutex_unlock(&c->alloc_sem);
7551da177e4SLinus Torvalds 	return ret;
7561da177e4SLinus Torvalds }
7571da177e4SLinus Torvalds 
7581da177e4SLinus Torvalds /* Pad write-buffer to end and write it, wasting space. */
7591da177e4SLinus Torvalds int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c)
7601da177e4SLinus Torvalds {
7611da177e4SLinus Torvalds 	int ret;
7621da177e4SLinus Torvalds 
7638aee6ac1SDavid Woodhouse 	if (!c->wbuf)
7648aee6ac1SDavid Woodhouse 		return 0;
7658aee6ac1SDavid Woodhouse 
7661da177e4SLinus Torvalds 	down_write(&c->wbuf_sem);
7671da177e4SLinus Torvalds 	ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
7687f716cf3SEstelle Hammache 	/* retry - maybe wbuf recover left some data in wbuf. */
7697f716cf3SEstelle Hammache 	if (ret)
7707f716cf3SEstelle Hammache 		ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
7711da177e4SLinus Torvalds 	up_write(&c->wbuf_sem);
7721da177e4SLinus Torvalds 
7731da177e4SLinus Torvalds 	return ret;
7741da177e4SLinus Torvalds }
7751da177e4SLinus Torvalds 
776dcb09328SThomas Gleixner static size_t jffs2_fill_wbuf(struct jffs2_sb_info *c, const uint8_t *buf,
777dcb09328SThomas Gleixner 			      size_t len)
778dcb09328SThomas Gleixner {
779dcb09328SThomas Gleixner 	if (len && !c->wbuf_len && (len >= c->wbuf_pagesize))
780dcb09328SThomas Gleixner 		return 0;
781dcb09328SThomas Gleixner 
782dcb09328SThomas Gleixner 	if (len > (c->wbuf_pagesize - c->wbuf_len))
783dcb09328SThomas Gleixner 		len = c->wbuf_pagesize - c->wbuf_len;
784dcb09328SThomas Gleixner 	memcpy(c->wbuf + c->wbuf_len, buf, len);
785dcb09328SThomas Gleixner 	c->wbuf_len += (uint32_t) len;
786dcb09328SThomas Gleixner 	return len;
787dcb09328SThomas Gleixner }
788dcb09328SThomas Gleixner 
789dcb09328SThomas Gleixner int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs,
790dcb09328SThomas Gleixner 		       unsigned long count, loff_t to, size_t *retlen,
791dcb09328SThomas Gleixner 		       uint32_t ino)
792dcb09328SThomas Gleixner {
793dcb09328SThomas Gleixner 	struct jffs2_eraseblock *jeb;
794dcb09328SThomas Gleixner 	size_t wbuf_retlen, donelen = 0;
795dcb09328SThomas Gleixner 	uint32_t outvec_to = to;
796dcb09328SThomas Gleixner 	int ret, invec;
797dcb09328SThomas Gleixner 
798dcb09328SThomas Gleixner 	/* If not writebuffered flash, don't bother */
7993be36675SAndrew Victor 	if (!jffs2_is_writebuffered(c))
8001da177e4SLinus Torvalds 		return jffs2_flash_direct_writev(c, invecs, count, to, retlen);
8011da177e4SLinus Torvalds 
8021da177e4SLinus Torvalds 	down_write(&c->wbuf_sem);
8031da177e4SLinus Torvalds 
8041da177e4SLinus Torvalds 	/* If wbuf_ofs is not initialized, set it to target address */
8051da177e4SLinus Torvalds 	if (c->wbuf_ofs == 0xFFFFFFFF) {
8061da177e4SLinus Torvalds 		c->wbuf_ofs = PAGE_DIV(to);
8071da177e4SLinus Torvalds 		c->wbuf_len = PAGE_MOD(to);
8081da177e4SLinus Torvalds 		memset(c->wbuf,0xff,c->wbuf_pagesize);
8091da177e4SLinus Torvalds 	}
8101da177e4SLinus Torvalds 
811dcb09328SThomas Gleixner 	/*
812dcb09328SThomas Gleixner 	 * Sanity checks on target address.  It's permitted to write
813dcb09328SThomas Gleixner 	 * at PAD(c->wbuf_len+c->wbuf_ofs), and it's permitted to
814dcb09328SThomas Gleixner 	 * write at the beginning of a new erase block. Anything else,
815dcb09328SThomas Gleixner 	 * and you die.  New block starts at xxx000c (0-b = block
816dcb09328SThomas Gleixner 	 * header)
8171da177e4SLinus Torvalds 	 */
8183be36675SAndrew Victor 	if (SECTOR_ADDR(to) != SECTOR_ADDR(c->wbuf_ofs)) {
8191da177e4SLinus Torvalds 		/* It's a write to a new block */
8201da177e4SLinus Torvalds 		if (c->wbuf_len) {
8219c261b33SJoe Perches 			jffs2_dbg(1, "%s(): to 0x%lx causes flush of wbuf at 0x%08x\n",
8229c261b33SJoe Perches 				  __func__, (unsigned long)to, c->wbuf_ofs);
8231da177e4SLinus Torvalds 			ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
824dcb09328SThomas Gleixner 			if (ret)
825dcb09328SThomas Gleixner 				goto outerr;
8261da177e4SLinus Torvalds 		}
8271da177e4SLinus Torvalds 		/* set pointer to new block */
8281da177e4SLinus Torvalds 		c->wbuf_ofs = PAGE_DIV(to);
8291da177e4SLinus Torvalds 		c->wbuf_len = PAGE_MOD(to);
8301da177e4SLinus Torvalds 	}
8311da177e4SLinus Torvalds 
8321da177e4SLinus Torvalds 	if (to != PAD(c->wbuf_ofs + c->wbuf_len)) {
8331da177e4SLinus Torvalds 		/* We're not writing immediately after the writebuffer. Bad. */
8349c261b33SJoe Perches 		printk(KERN_CRIT "%s(): Non-contiguous write to %08lx\n",
8359c261b33SJoe Perches 		       __func__, (unsigned long)to);
8361da177e4SLinus Torvalds 		if (c->wbuf_len)
8371da177e4SLinus Torvalds 			printk(KERN_CRIT "wbuf was previously %08x-%08x\n",
8381da177e4SLinus Torvalds 			       c->wbuf_ofs, c->wbuf_ofs+c->wbuf_len);
8391da177e4SLinus Torvalds 		BUG();
8401da177e4SLinus Torvalds 	}
8411da177e4SLinus Torvalds 
8421da177e4SLinus Torvalds 	/* adjust alignment offset */
8431da177e4SLinus Torvalds 	if (c->wbuf_len != PAGE_MOD(to)) {
8441da177e4SLinus Torvalds 		c->wbuf_len = PAGE_MOD(to);
8451da177e4SLinus Torvalds 		/* take care of alignment to next page */
846dcb09328SThomas Gleixner 		if (!c->wbuf_len) {
8471da177e4SLinus Torvalds 			c->wbuf_len = c->wbuf_pagesize;
8481da177e4SLinus Torvalds 			ret = __jffs2_flush_wbuf(c, NOPAD);
849dcb09328SThomas Gleixner 			if (ret)
850dcb09328SThomas Gleixner 				goto outerr;
8511da177e4SLinus Torvalds 		}
8521da177e4SLinus Torvalds 	}
8531da177e4SLinus Torvalds 
854dcb09328SThomas Gleixner 	for (invec = 0; invec < count; invec++) {
855dcb09328SThomas Gleixner 		int vlen = invecs[invec].iov_len;
856dcb09328SThomas Gleixner 		uint8_t *v = invecs[invec].iov_base;
8571da177e4SLinus Torvalds 
858dcb09328SThomas Gleixner 		wbuf_retlen = jffs2_fill_wbuf(c, v, vlen);
8591da177e4SLinus Torvalds 
860dcb09328SThomas Gleixner 		if (c->wbuf_len == c->wbuf_pagesize) {
861dcb09328SThomas Gleixner 			ret = __jffs2_flush_wbuf(c, NOPAD);
862dcb09328SThomas Gleixner 			if (ret)
863dcb09328SThomas Gleixner 				goto outerr;
8641da177e4SLinus Torvalds 		}
865dcb09328SThomas Gleixner 		vlen -= wbuf_retlen;
866dcb09328SThomas Gleixner 		outvec_to += wbuf_retlen;
8671da177e4SLinus Torvalds 		donelen += wbuf_retlen;
868dcb09328SThomas Gleixner 		v += wbuf_retlen;
8691da177e4SLinus Torvalds 
870dcb09328SThomas Gleixner 		if (vlen >= c->wbuf_pagesize) {
871eda95cbfSArtem Bityutskiy 			ret = mtd_write(c->mtd, outvec_to, PAGE_DIV(vlen),
872dcb09328SThomas Gleixner 					&wbuf_retlen, v);
873dcb09328SThomas Gleixner 			if (ret < 0 || wbuf_retlen != PAGE_DIV(vlen))
874dcb09328SThomas Gleixner 				goto outfile;
875dcb09328SThomas Gleixner 
876dcb09328SThomas Gleixner 			vlen -= wbuf_retlen;
877dcb09328SThomas Gleixner 			outvec_to += wbuf_retlen;
878dcb09328SThomas Gleixner 			c->wbuf_ofs = outvec_to;
879dcb09328SThomas Gleixner 			donelen += wbuf_retlen;
880dcb09328SThomas Gleixner 			v += wbuf_retlen;
8811da177e4SLinus Torvalds 		}
8821da177e4SLinus Torvalds 
883dcb09328SThomas Gleixner 		wbuf_retlen = jffs2_fill_wbuf(c, v, vlen);
884dcb09328SThomas Gleixner 		if (c->wbuf_len == c->wbuf_pagesize) {
885dcb09328SThomas Gleixner 			ret = __jffs2_flush_wbuf(c, NOPAD);
886dcb09328SThomas Gleixner 			if (ret)
887dcb09328SThomas Gleixner 				goto outerr;
8881da177e4SLinus Torvalds 		}
8891da177e4SLinus Torvalds 
890dcb09328SThomas Gleixner 		outvec_to += wbuf_retlen;
891dcb09328SThomas Gleixner 		donelen += wbuf_retlen;
8921da177e4SLinus Torvalds 	}
8931da177e4SLinus Torvalds 
894dcb09328SThomas Gleixner 	/*
895dcb09328SThomas Gleixner 	 * If there's a remainder in the wbuf and it's a non-GC write,
896dcb09328SThomas Gleixner 	 * remember that the wbuf affects this ino
897dcb09328SThomas Gleixner 	 */
8981da177e4SLinus Torvalds 	*retlen = donelen;
8991da177e4SLinus Torvalds 
900e631ddbaSFerenc Havasi 	if (jffs2_sum_active()) {
901e631ddbaSFerenc Havasi 		int res = jffs2_sum_add_kvec(c, invecs, count, (uint32_t) to);
902e631ddbaSFerenc Havasi 		if (res)
903e631ddbaSFerenc Havasi 			return res;
904e631ddbaSFerenc Havasi 	}
905e631ddbaSFerenc Havasi 
9061da177e4SLinus Torvalds 	if (c->wbuf_len && ino)
9071da177e4SLinus Torvalds 		jffs2_wbuf_dirties_inode(c, ino);
9081da177e4SLinus Torvalds 
9091da177e4SLinus Torvalds 	ret = 0;
910dcb09328SThomas Gleixner 	up_write(&c->wbuf_sem);
911dcb09328SThomas Gleixner 	return ret;
9121da177e4SLinus Torvalds 
913dcb09328SThomas Gleixner outfile:
914dcb09328SThomas Gleixner 	/*
915dcb09328SThomas Gleixner 	 * At this point we have no problem, c->wbuf is empty. However
916dcb09328SThomas Gleixner 	 * refile nextblock to avoid writing again to same address.
917dcb09328SThomas Gleixner 	 */
918dcb09328SThomas Gleixner 
919dcb09328SThomas Gleixner 	spin_lock(&c->erase_completion_lock);
920dcb09328SThomas Gleixner 
921dcb09328SThomas Gleixner 	jeb = &c->blocks[outvec_to / c->sector_size];
922dcb09328SThomas Gleixner 	jffs2_block_refile(c, jeb, REFILE_ANYWAY);
923dcb09328SThomas Gleixner 
924dcb09328SThomas Gleixner 	spin_unlock(&c->erase_completion_lock);
925dcb09328SThomas Gleixner 
926dcb09328SThomas Gleixner outerr:
927dcb09328SThomas Gleixner 	*retlen = 0;
9281da177e4SLinus Torvalds 	up_write(&c->wbuf_sem);
9291da177e4SLinus Torvalds 	return ret;
9301da177e4SLinus Torvalds }
9311da177e4SLinus Torvalds 
9321da177e4SLinus Torvalds /*
9331da177e4SLinus Torvalds  *	This is the entry for flash write.
9341da177e4SLinus Torvalds  *	Check, if we work on NAND FLASH, if so build an kvec and write it via vritev
9351da177e4SLinus Torvalds */
9369bfeb691SDavid Woodhouse int jffs2_flash_write(struct jffs2_sb_info *c, loff_t ofs, size_t len,
9379bfeb691SDavid Woodhouse 		      size_t *retlen, const u_char *buf)
9381da177e4SLinus Torvalds {
9391da177e4SLinus Torvalds 	struct kvec vecs[1];
9401da177e4SLinus Torvalds 
9413be36675SAndrew Victor 	if (!jffs2_is_writebuffered(c))
942e631ddbaSFerenc Havasi 		return jffs2_flash_direct_write(c, ofs, len, retlen, buf);
9431da177e4SLinus Torvalds 
9441da177e4SLinus Torvalds 	vecs[0].iov_base = (unsigned char *) buf;
9451da177e4SLinus Torvalds 	vecs[0].iov_len = len;
9461da177e4SLinus Torvalds 	return jffs2_flash_writev(c, vecs, 1, ofs, retlen, 0);
9471da177e4SLinus Torvalds }
9481da177e4SLinus Torvalds 
9491da177e4SLinus Torvalds /*
9501da177e4SLinus Torvalds 	Handle readback from writebuffer and ECC failure return
9511da177e4SLinus Torvalds */
9521da177e4SLinus Torvalds int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, u_char *buf)
9531da177e4SLinus Torvalds {
9541da177e4SLinus Torvalds 	loff_t	orbf = 0, owbf = 0, lwbf = 0;
9551da177e4SLinus Torvalds 	int	ret;
9561da177e4SLinus Torvalds 
9573be36675SAndrew Victor 	if (!jffs2_is_writebuffered(c))
958329ad399SArtem Bityutskiy 		return mtd_read(c->mtd, ofs, len, retlen, buf);
9591da177e4SLinus Torvalds 
9603be36675SAndrew Victor 	/* Read flash */
961894214d1SArtem B. Bityuckiy 	down_read(&c->wbuf_sem);
962329ad399SArtem Bityutskiy 	ret = mtd_read(c->mtd, ofs, len, retlen, buf);
9631da177e4SLinus Torvalds 
9649a1fcdfdSThomas Gleixner 	if ( (ret == -EBADMSG || ret == -EUCLEAN) && (*retlen == len) ) {
9659a1fcdfdSThomas Gleixner 		if (ret == -EBADMSG)
9669a1fcdfdSThomas Gleixner 			printk(KERN_WARNING "mtd->read(0x%zx bytes from 0x%llx)"
9679a1fcdfdSThomas Gleixner 			       " returned ECC error\n", len, ofs);
9681da177e4SLinus Torvalds 		/*
9699a1fcdfdSThomas Gleixner 		 * We have the raw data without ECC correction in the buffer,
9709a1fcdfdSThomas Gleixner 		 * maybe we are lucky and all data or parts are correct. We
9719a1fcdfdSThomas Gleixner 		 * check the node.  If data are corrupted node check will sort
9729a1fcdfdSThomas Gleixner 		 * it out.  We keep this block, it will fail on write or erase
9739a1fcdfdSThomas Gleixner 		 * and the we mark it bad. Or should we do that now? But we
9749a1fcdfdSThomas Gleixner 		 * should give him a chance.  Maybe we had a system crash or
9759a1fcdfdSThomas Gleixner 		 * power loss before the ecc write or a erase was completed.
9761da177e4SLinus Torvalds 		 * So we return success. :)
9771da177e4SLinus Torvalds 		 */
9781da177e4SLinus Torvalds 		ret = 0;
9791da177e4SLinus Torvalds 	}
9801da177e4SLinus Torvalds 
9811da177e4SLinus Torvalds 	/* if no writebuffer available or write buffer empty, return */
9821da177e4SLinus Torvalds 	if (!c->wbuf_pagesize || !c->wbuf_len)
983894214d1SArtem B. Bityuckiy 		goto exit;
9841da177e4SLinus Torvalds 
9851da177e4SLinus Torvalds 	/* if we read in a different block, return */
9863be36675SAndrew Victor 	if (SECTOR_ADDR(ofs) != SECTOR_ADDR(c->wbuf_ofs))
987894214d1SArtem B. Bityuckiy 		goto exit;
9881da177e4SLinus Torvalds 
9891da177e4SLinus Torvalds 	if (ofs >= c->wbuf_ofs) {
9901da177e4SLinus Torvalds 		owbf = (ofs - c->wbuf_ofs);	/* offset in write buffer */
9911da177e4SLinus Torvalds 		if (owbf > c->wbuf_len)		/* is read beyond write buffer ? */
9921da177e4SLinus Torvalds 			goto exit;
9931da177e4SLinus Torvalds 		lwbf = c->wbuf_len - owbf;	/* number of bytes to copy */
9941da177e4SLinus Torvalds 		if (lwbf > len)
9951da177e4SLinus Torvalds 			lwbf = len;
9961da177e4SLinus Torvalds 	} else {
9971da177e4SLinus Torvalds 		orbf = (c->wbuf_ofs - ofs);	/* offset in read buffer */
9981da177e4SLinus Torvalds 		if (orbf > len)			/* is write beyond write buffer ? */
9991da177e4SLinus Torvalds 			goto exit;
10001da177e4SLinus Torvalds 		lwbf = len - orbf;		/* number of bytes to copy */
10011da177e4SLinus Torvalds 		if (lwbf > c->wbuf_len)
10021da177e4SLinus Torvalds 			lwbf = c->wbuf_len;
10031da177e4SLinus Torvalds 	}
10041da177e4SLinus Torvalds 	if (lwbf > 0)
10051da177e4SLinus Torvalds 		memcpy(buf+orbf,c->wbuf+owbf,lwbf);
10061da177e4SLinus Torvalds 
10071da177e4SLinus Torvalds exit:
10081da177e4SLinus Torvalds 	up_read(&c->wbuf_sem);
10091da177e4SLinus Torvalds 	return ret;
10101da177e4SLinus Torvalds }
10111da177e4SLinus Torvalds 
10128593fbc6SThomas Gleixner #define NR_OOB_SCAN_PAGES 4
10138593fbc6SThomas Gleixner 
101409b3fba5SDavid Woodhouse /* For historical reasons we use only 8 bytes for OOB clean marker */
101509b3fba5SDavid Woodhouse #define OOB_CM_SIZE 8
1016a7a6ace1SArtem Bityutskiy 
1017a7a6ace1SArtem Bityutskiy static const struct jffs2_unknown_node oob_cleanmarker =
1018a7a6ace1SArtem Bityutskiy {
1019566865a2SDavid Woodhouse 	.magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
1020566865a2SDavid Woodhouse 	.nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
1021566865a2SDavid Woodhouse 	.totlen = constant_cpu_to_je32(8)
1022a7a6ace1SArtem Bityutskiy };
1023a7a6ace1SArtem Bityutskiy 
10241da177e4SLinus Torvalds /*
1025a7a6ace1SArtem Bityutskiy  * Check, if the out of band area is empty. This function knows about the clean
1026a7a6ace1SArtem Bityutskiy  * marker and if it is present in OOB, treats the OOB as empty anyway.
10271da177e4SLinus Torvalds  */
10288593fbc6SThomas Gleixner int jffs2_check_oob_empty(struct jffs2_sb_info *c,
10298593fbc6SThomas Gleixner 			  struct jffs2_eraseblock *jeb, int mode)
10301da177e4SLinus Torvalds {
1031a7a6ace1SArtem Bityutskiy 	int i, ret;
1032a7a6ace1SArtem Bityutskiy 	int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
10338593fbc6SThomas Gleixner 	struct mtd_oob_ops ops;
10341da177e4SLinus Torvalds 
10350612b9ddSBrian Norris 	ops.mode = MTD_OPS_AUTO_OOB;
1036a7a6ace1SArtem Bityutskiy 	ops.ooblen = NR_OOB_SCAN_PAGES * c->oobavail;
10378593fbc6SThomas Gleixner 	ops.oobbuf = c->oobbuf;
1038a7a6ace1SArtem Bityutskiy 	ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
10398593fbc6SThomas Gleixner 	ops.datbuf = NULL;
10408593fbc6SThomas Gleixner 
1041fd2819bbSArtem Bityutskiy 	ret = mtd_read_oob(c->mtd, jeb->offset, &ops);
1042a7a6ace1SArtem Bityutskiy 	if (ret || ops.oobretlen != ops.ooblen) {
10437be26bfbSAndrew Morton 		printk(KERN_ERR "cannot read OOB for EB at %08x, requested %zd"
10447be26bfbSAndrew Morton 				" bytes, read %zd bytes, error %d\n",
10457be26bfbSAndrew Morton 				jeb->offset, ops.ooblen, ops.oobretlen, ret);
1046a7a6ace1SArtem Bityutskiy 		if (!ret)
1047a7a6ace1SArtem Bityutskiy 			ret = -EIO;
10488593fbc6SThomas Gleixner 		return ret;
10491da177e4SLinus Torvalds 	}
10501da177e4SLinus Torvalds 
1051a7a6ace1SArtem Bityutskiy 	for(i = 0; i < ops.ooblen; i++) {
1052a7a6ace1SArtem Bityutskiy 		if (mode && i < cmlen)
1053a7a6ace1SArtem Bityutskiy 			/* Yeah, we know about the cleanmarker */
10541da177e4SLinus Torvalds 			continue;
10551da177e4SLinus Torvalds 
10568593fbc6SThomas Gleixner 		if (ops.oobbuf[i] != 0xFF) {
10579c261b33SJoe Perches 			jffs2_dbg(2, "Found %02x at %x in OOB for "
10589c261b33SJoe Perches 				  "%08x\n", ops.oobbuf[i], i, jeb->offset);
10598593fbc6SThomas Gleixner 			return 1;
10601da177e4SLinus Torvalds 		}
10611da177e4SLinus Torvalds 	}
10621da177e4SLinus Torvalds 
10638593fbc6SThomas Gleixner 	return 0;
10641da177e4SLinus Torvalds }
10651da177e4SLinus Torvalds 
10661da177e4SLinus Torvalds /*
1067a7a6ace1SArtem Bityutskiy  * Check for a valid cleanmarker.
1068a7a6ace1SArtem Bityutskiy  * Returns: 0 if a valid cleanmarker was found
1069a7a6ace1SArtem Bityutskiy  *	    1 if no cleanmarker was found
1070a7a6ace1SArtem Bityutskiy  *	    negative error code if an error occurred
10711da177e4SLinus Torvalds  */
10728593fbc6SThomas Gleixner int jffs2_check_nand_cleanmarker(struct jffs2_sb_info *c,
10738593fbc6SThomas Gleixner 				 struct jffs2_eraseblock *jeb)
10741da177e4SLinus Torvalds {
10758593fbc6SThomas Gleixner 	struct mtd_oob_ops ops;
1076a7a6ace1SArtem Bityutskiy 	int ret, cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
10771da177e4SLinus Torvalds 
10780612b9ddSBrian Norris 	ops.mode = MTD_OPS_AUTO_OOB;
1079a7a6ace1SArtem Bityutskiy 	ops.ooblen = cmlen;
10808593fbc6SThomas Gleixner 	ops.oobbuf = c->oobbuf;
1081a7a6ace1SArtem Bityutskiy 	ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
10828593fbc6SThomas Gleixner 	ops.datbuf = NULL;
10838593fbc6SThomas Gleixner 
1084fd2819bbSArtem Bityutskiy 	ret = mtd_read_oob(c->mtd, jeb->offset, &ops);
1085a7a6ace1SArtem Bityutskiy 	if (ret || ops.oobretlen != ops.ooblen) {
10867be26bfbSAndrew Morton 		printk(KERN_ERR "cannot read OOB for EB at %08x, requested %zd"
10877be26bfbSAndrew Morton 				" bytes, read %zd bytes, error %d\n",
10887be26bfbSAndrew Morton 				jeb->offset, ops.ooblen, ops.oobretlen, ret);
1089a7a6ace1SArtem Bityutskiy 		if (!ret)
1090a7a6ace1SArtem Bityutskiy 			ret = -EIO;
10911da177e4SLinus Torvalds 		return ret;
10921da177e4SLinus Torvalds 	}
10938593fbc6SThomas Gleixner 
1094a7a6ace1SArtem Bityutskiy 	return !!memcmp(&oob_cleanmarker, c->oobbuf, cmlen);
10951da177e4SLinus Torvalds }
10961da177e4SLinus Torvalds 
10978593fbc6SThomas Gleixner int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c,
10988593fbc6SThomas Gleixner 				 struct jffs2_eraseblock *jeb)
10991da177e4SLinus Torvalds {
11001da177e4SLinus Torvalds 	int ret;
11018593fbc6SThomas Gleixner 	struct mtd_oob_ops ops;
1102a7a6ace1SArtem Bityutskiy 	int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
11031da177e4SLinus Torvalds 
11040612b9ddSBrian Norris 	ops.mode = MTD_OPS_AUTO_OOB;
1105a7a6ace1SArtem Bityutskiy 	ops.ooblen = cmlen;
1106a7a6ace1SArtem Bityutskiy 	ops.oobbuf = (uint8_t *)&oob_cleanmarker;
1107a7a6ace1SArtem Bityutskiy 	ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
11088593fbc6SThomas Gleixner 	ops.datbuf = NULL;
11098593fbc6SThomas Gleixner 
1110a2cc5ba0SArtem Bityutskiy 	ret = mtd_write_oob(c->mtd, jeb->offset, &ops);
1111a7a6ace1SArtem Bityutskiy 	if (ret || ops.oobretlen != ops.ooblen) {
11127be26bfbSAndrew Morton 		printk(KERN_ERR "cannot write OOB for EB at %08x, requested %zd"
11137be26bfbSAndrew Morton 				" bytes, read %zd bytes, error %d\n",
11147be26bfbSAndrew Morton 				jeb->offset, ops.ooblen, ops.oobretlen, ret);
1115a7a6ace1SArtem Bityutskiy 		if (!ret)
1116a7a6ace1SArtem Bityutskiy 			ret = -EIO;
11171da177e4SLinus Torvalds 		return ret;
11181da177e4SLinus Torvalds 	}
1119a7a6ace1SArtem Bityutskiy 
11201da177e4SLinus Torvalds 	return 0;
11211da177e4SLinus Torvalds }
11221da177e4SLinus Torvalds 
11231da177e4SLinus Torvalds /*
11241da177e4SLinus Torvalds  * On NAND we try to mark this block bad. If the block was erased more
112525985edcSLucas De Marchi  * than MAX_ERASE_FAILURES we mark it finally bad.
11261da177e4SLinus Torvalds  * Don't care about failures. This block remains on the erase-pending
11271da177e4SLinus Torvalds  * or badblock list as long as nobody manipulates the flash with
11281da177e4SLinus Torvalds  * a bootloader or something like that.
11291da177e4SLinus Torvalds  */
11301da177e4SLinus Torvalds 
11311da177e4SLinus Torvalds int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset)
11321da177e4SLinus Torvalds {
11331da177e4SLinus Torvalds 	int 	ret;
11341da177e4SLinus Torvalds 
11351da177e4SLinus Torvalds 	/* if the count is < max, we try to write the counter to the 2nd page oob area */
11361da177e4SLinus Torvalds 	if( ++jeb->bad_count < MAX_ERASE_FAILURES)
11371da177e4SLinus Torvalds 		return 0;
11381da177e4SLinus Torvalds 
11390feba829SArtem Bityutskiy 	printk(KERN_WARNING "JFFS2: marking eraseblock at %08x\n as bad", bad_offset);
11405942ddbcSArtem Bityutskiy 	ret = mtd_block_markbad(c->mtd, bad_offset);
11411da177e4SLinus Torvalds 
11421da177e4SLinus Torvalds 	if (ret) {
11439c261b33SJoe Perches 		jffs2_dbg(1, "%s(): Write failed for block at %08x: error %d\n",
11449c261b33SJoe Perches 			  __func__, jeb->offset, ret);
11451da177e4SLinus Torvalds 		return ret;
11461da177e4SLinus Torvalds 	}
11471da177e4SLinus Torvalds 	return 1;
11481da177e4SLinus Torvalds }
11491da177e4SLinus Torvalds 
1150a7a6ace1SArtem Bityutskiy int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
11511da177e4SLinus Torvalds {
11525bd34c09SThomas Gleixner 	struct nand_ecclayout *oinfo = c->mtd->ecclayout;
11531da177e4SLinus Torvalds 
11541da177e4SLinus Torvalds 	if (!c->mtd->oobsize)
11551da177e4SLinus Torvalds 		return 0;
11561da177e4SLinus Torvalds 
11571da177e4SLinus Torvalds 	/* Cleanmarker is out-of-band, so inline size zero */
11581da177e4SLinus Torvalds 	c->cleanmarker_size = 0;
11591da177e4SLinus Torvalds 
1160a7a6ace1SArtem Bityutskiy 	if (!oinfo || oinfo->oobavail == 0) {
1161a7a6ace1SArtem Bityutskiy 		printk(KERN_ERR "inconsistent device description\n");
11621da177e4SLinus Torvalds 		return -EINVAL;
11631da177e4SLinus Torvalds 	}
11645bd34c09SThomas Gleixner 
11659c261b33SJoe Perches 	jffs2_dbg(1, "JFFS2 using OOB on NAND\n");
11665bd34c09SThomas Gleixner 
1167a7a6ace1SArtem Bityutskiy 	c->oobavail = oinfo->oobavail;
11681da177e4SLinus Torvalds 
11691da177e4SLinus Torvalds 	/* Initialise write buffer */
11701da177e4SLinus Torvalds 	init_rwsem(&c->wbuf_sem);
117128318776SJoern Engel 	c->wbuf_pagesize = c->mtd->writesize;
11721da177e4SLinus Torvalds 	c->wbuf_ofs = 0xFFFFFFFF;
11731da177e4SLinus Torvalds 
11741da177e4SLinus Torvalds 	c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
11751da177e4SLinus Torvalds 	if (!c->wbuf)
11761da177e4SLinus Torvalds 		return -ENOMEM;
11771da177e4SLinus Torvalds 
1178a7a6ace1SArtem Bityutskiy 	c->oobbuf = kmalloc(NR_OOB_SCAN_PAGES * c->oobavail, GFP_KERNEL);
1179a7a6ace1SArtem Bityutskiy 	if (!c->oobbuf) {
11801da177e4SLinus Torvalds 		kfree(c->wbuf);
11811da177e4SLinus Torvalds 		return -ENOMEM;
11821da177e4SLinus Torvalds 	}
1183a7a6ace1SArtem Bityutskiy 
1184a6bc432eSDavid Woodhouse #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1185a6bc432eSDavid Woodhouse 	c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1186a6bc432eSDavid Woodhouse 	if (!c->wbuf_verify) {
1187a6bc432eSDavid Woodhouse 		kfree(c->oobbuf);
1188a6bc432eSDavid Woodhouse 		kfree(c->wbuf);
1189a6bc432eSDavid Woodhouse 		return -ENOMEM;
1190a6bc432eSDavid Woodhouse 	}
1191a6bc432eSDavid Woodhouse #endif
1192a7a6ace1SArtem Bityutskiy 	return 0;
11931da177e4SLinus Torvalds }
11941da177e4SLinus Torvalds 
11951da177e4SLinus Torvalds void jffs2_nand_flash_cleanup(struct jffs2_sb_info *c)
11961da177e4SLinus Torvalds {
1197a6bc432eSDavid Woodhouse #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1198a6bc432eSDavid Woodhouse 	kfree(c->wbuf_verify);
1199a6bc432eSDavid Woodhouse #endif
12001da177e4SLinus Torvalds 	kfree(c->wbuf);
12018593fbc6SThomas Gleixner 	kfree(c->oobbuf);
12021da177e4SLinus Torvalds }
12031da177e4SLinus Torvalds 
12048f15fd55SAndrew Victor int jffs2_dataflash_setup(struct jffs2_sb_info *c) {
12058f15fd55SAndrew Victor 	c->cleanmarker_size = 0;		/* No cleanmarkers needed */
12068f15fd55SAndrew Victor 
12078f15fd55SAndrew Victor 	/* Initialize write buffer */
12088f15fd55SAndrew Victor 	init_rwsem(&c->wbuf_sem);
12098f15fd55SAndrew Victor 
1210daba5cc4SArtem B. Bityutskiy 
1211daba5cc4SArtem B. Bityutskiy 	c->wbuf_pagesize =  c->mtd->erasesize;
1212daba5cc4SArtem B. Bityutskiy 
1213daba5cc4SArtem B. Bityutskiy 	/* Find a suitable c->sector_size
1214daba5cc4SArtem B. Bityutskiy 	 * - Not too much sectors
1215daba5cc4SArtem B. Bityutskiy 	 * - Sectors have to be at least 4 K + some bytes
1216daba5cc4SArtem B. Bityutskiy 	 * - All known dataflashes have erase sizes of 528 or 1056
1217daba5cc4SArtem B. Bityutskiy 	 * - we take at least 8 eraseblocks and want to have at least 8K size
1218daba5cc4SArtem B. Bityutskiy 	 * - The concatenation should be a power of 2
1219daba5cc4SArtem B. Bityutskiy 	*/
1220daba5cc4SArtem B. Bityutskiy 
1221daba5cc4SArtem B. Bityutskiy 	c->sector_size = 8 * c->mtd->erasesize;
1222daba5cc4SArtem B. Bityutskiy 
1223daba5cc4SArtem B. Bityutskiy 	while (c->sector_size < 8192) {
1224daba5cc4SArtem B. Bityutskiy 		c->sector_size *= 2;
1225daba5cc4SArtem B. Bityutskiy 	}
1226daba5cc4SArtem B. Bityutskiy 
1227daba5cc4SArtem B. Bityutskiy 	/* It may be necessary to adjust the flash size */
1228daba5cc4SArtem B. Bityutskiy 	c->flash_size = c->mtd->size;
1229daba5cc4SArtem B. Bityutskiy 
1230daba5cc4SArtem B. Bityutskiy 	if ((c->flash_size % c->sector_size) != 0) {
1231daba5cc4SArtem B. Bityutskiy 		c->flash_size = (c->flash_size / c->sector_size) * c->sector_size;
1232daba5cc4SArtem B. Bityutskiy 		printk(KERN_WARNING "JFFS2 flash size adjusted to %dKiB\n", c->flash_size);
1233daba5cc4SArtem B. Bityutskiy 	};
1234daba5cc4SArtem B. Bityutskiy 
1235daba5cc4SArtem B. Bityutskiy 	c->wbuf_ofs = 0xFFFFFFFF;
12368f15fd55SAndrew Victor 	c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
12378f15fd55SAndrew Victor 	if (!c->wbuf)
12388f15fd55SAndrew Victor 		return -ENOMEM;
12398f15fd55SAndrew Victor 
1240cca15841Smichael #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1241cca15841Smichael 	c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1242cca15841Smichael 	if (!c->wbuf_verify) {
1243cca15841Smichael 		kfree(c->oobbuf);
1244cca15841Smichael 		kfree(c->wbuf);
1245cca15841Smichael 		return -ENOMEM;
1246cca15841Smichael 	}
1247cca15841Smichael #endif
1248cca15841Smichael 
1249daba5cc4SArtem B. Bityutskiy 	printk(KERN_INFO "JFFS2 write-buffering enabled buffer (%d) erasesize (%d)\n", c->wbuf_pagesize, c->sector_size);
12508f15fd55SAndrew Victor 
12518f15fd55SAndrew Victor 	return 0;
12528f15fd55SAndrew Victor }
12538f15fd55SAndrew Victor 
12548f15fd55SAndrew Victor void jffs2_dataflash_cleanup(struct jffs2_sb_info *c) {
1255cca15841Smichael #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1256cca15841Smichael 	kfree(c->wbuf_verify);
1257cca15841Smichael #endif
12588f15fd55SAndrew Victor 	kfree(c->wbuf);
12598f15fd55SAndrew Victor }
12608f15fd55SAndrew Victor 
126159da721aSNicolas Pitre int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) {
1262c8b229deSJoern Engel 	/* Cleanmarker currently occupies whole programming regions,
1263c8b229deSJoern Engel 	 * either one or 2 for 8Byte STMicro flashes. */
1264c8b229deSJoern Engel 	c->cleanmarker_size = max(16u, c->mtd->writesize);
126559da721aSNicolas Pitre 
126659da721aSNicolas Pitre 	/* Initialize write buffer */
126759da721aSNicolas Pitre 	init_rwsem(&c->wbuf_sem);
126828318776SJoern Engel 	c->wbuf_pagesize = c->mtd->writesize;
126959da721aSNicolas Pitre 	c->wbuf_ofs = 0xFFFFFFFF;
127059da721aSNicolas Pitre 
127159da721aSNicolas Pitre 	c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
127259da721aSNicolas Pitre 	if (!c->wbuf)
127359da721aSNicolas Pitre 		return -ENOMEM;
127459da721aSNicolas Pitre 
1275bc8cec0dSMassimo Cirillo #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1276bc8cec0dSMassimo Cirillo 	c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1277bc8cec0dSMassimo Cirillo 	if (!c->wbuf_verify) {
1278bc8cec0dSMassimo Cirillo 		kfree(c->wbuf);
1279bc8cec0dSMassimo Cirillo 		return -ENOMEM;
1280bc8cec0dSMassimo Cirillo 	}
1281bc8cec0dSMassimo Cirillo #endif
128259da721aSNicolas Pitre 	return 0;
128359da721aSNicolas Pitre }
128459da721aSNicolas Pitre 
128559da721aSNicolas Pitre void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c) {
1286bc8cec0dSMassimo Cirillo #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1287bc8cec0dSMassimo Cirillo 	kfree(c->wbuf_verify);
1288bc8cec0dSMassimo Cirillo #endif
128959da721aSNicolas Pitre 	kfree(c->wbuf);
129059da721aSNicolas Pitre }
12910029da3bSArtem Bityutskiy 
12920029da3bSArtem Bityutskiy int jffs2_ubivol_setup(struct jffs2_sb_info *c) {
12930029da3bSArtem Bityutskiy 	c->cleanmarker_size = 0;
12940029da3bSArtem Bityutskiy 
12950029da3bSArtem Bityutskiy 	if (c->mtd->writesize == 1)
12960029da3bSArtem Bityutskiy 		/* We do not need write-buffer */
12970029da3bSArtem Bityutskiy 		return 0;
12980029da3bSArtem Bityutskiy 
12990029da3bSArtem Bityutskiy 	init_rwsem(&c->wbuf_sem);
13000029da3bSArtem Bityutskiy 
13010029da3bSArtem Bityutskiy 	c->wbuf_pagesize =  c->mtd->writesize;
13020029da3bSArtem Bityutskiy 	c->wbuf_ofs = 0xFFFFFFFF;
13030029da3bSArtem Bityutskiy 	c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
13040029da3bSArtem Bityutskiy 	if (!c->wbuf)
13050029da3bSArtem Bityutskiy 		return -ENOMEM;
13060029da3bSArtem Bityutskiy 
13070029da3bSArtem Bityutskiy 	printk(KERN_INFO "JFFS2 write-buffering enabled buffer (%d) erasesize (%d)\n", c->wbuf_pagesize, c->sector_size);
13080029da3bSArtem Bityutskiy 
13090029da3bSArtem Bityutskiy 	return 0;
13100029da3bSArtem Bityutskiy }
13110029da3bSArtem Bityutskiy 
13120029da3bSArtem Bityutskiy void jffs2_ubivol_cleanup(struct jffs2_sb_info *c) {
13130029da3bSArtem Bityutskiy 	kfree(c->wbuf);
13140029da3bSArtem Bityutskiy }
1315