xref: /openbmc/linux/fs/jffs2/wbuf.c (revision da320f05)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * JFFS2 -- Journalling Flash File System, Version 2.
31da177e4SLinus Torvalds  *
4c00c310eSDavid Woodhouse  * Copyright © 2001-2007 Red Hat, Inc.
5c00c310eSDavid Woodhouse  * Copyright © 2004 Thomas Gleixner <tglx@linutronix.de>
61da177e4SLinus Torvalds  *
71da177e4SLinus Torvalds  * Created by David Woodhouse <dwmw2@infradead.org>
81da177e4SLinus Torvalds  * Modified debugged and enhanced by Thomas Gleixner <tglx@linutronix.de>
91da177e4SLinus Torvalds  *
101da177e4SLinus Torvalds  * For licensing information, see the file 'LICENCE' in this directory.
111da177e4SLinus Torvalds  *
121da177e4SLinus Torvalds  */
131da177e4SLinus Torvalds 
141da177e4SLinus Torvalds #include <linux/kernel.h>
151da177e4SLinus Torvalds #include <linux/slab.h>
161da177e4SLinus Torvalds #include <linux/mtd/mtd.h>
171da177e4SLinus Torvalds #include <linux/crc32.h>
181da177e4SLinus Torvalds #include <linux/mtd/nand.h>
194e57b681STim Schmielau #include <linux/jiffies.h>
20914e2637SAl Viro #include <linux/sched.h>
214e57b681STim Schmielau 
221da177e4SLinus Torvalds #include "nodelist.h"
231da177e4SLinus Torvalds 
241da177e4SLinus Torvalds /* For testing write failures */
251da177e4SLinus Torvalds #undef BREAKME
261da177e4SLinus Torvalds #undef BREAKMEHEADER
271da177e4SLinus Torvalds 
281da177e4SLinus Torvalds #ifdef BREAKME
291da177e4SLinus Torvalds static unsigned char *brokenbuf;
301da177e4SLinus Torvalds #endif
311da177e4SLinus Torvalds 
32daba5cc4SArtem B. Bityutskiy #define PAGE_DIV(x) ( ((unsigned long)(x) / (unsigned long)(c->wbuf_pagesize)) * (unsigned long)(c->wbuf_pagesize) )
33daba5cc4SArtem B. Bityutskiy #define PAGE_MOD(x) ( (unsigned long)(x) % (unsigned long)(c->wbuf_pagesize) )
34daba5cc4SArtem B. Bityutskiy 
351da177e4SLinus Torvalds /* max. erase failures before we mark a block bad */
361da177e4SLinus Torvalds #define MAX_ERASE_FAILURES 	2
371da177e4SLinus Torvalds 
381da177e4SLinus Torvalds struct jffs2_inodirty {
391da177e4SLinus Torvalds 	uint32_t ino;
401da177e4SLinus Torvalds 	struct jffs2_inodirty *next;
411da177e4SLinus Torvalds };
421da177e4SLinus Torvalds 
431da177e4SLinus Torvalds static struct jffs2_inodirty inodirty_nomem;
441da177e4SLinus Torvalds 
451da177e4SLinus Torvalds static int jffs2_wbuf_pending_for_ino(struct jffs2_sb_info *c, uint32_t ino)
461da177e4SLinus Torvalds {
471da177e4SLinus Torvalds 	struct jffs2_inodirty *this = c->wbuf_inodes;
481da177e4SLinus Torvalds 
491da177e4SLinus Torvalds 	/* If a malloc failed, consider _everything_ dirty */
501da177e4SLinus Torvalds 	if (this == &inodirty_nomem)
511da177e4SLinus Torvalds 		return 1;
521da177e4SLinus Torvalds 
531da177e4SLinus Torvalds 	/* If ino == 0, _any_ non-GC writes mean 'yes' */
541da177e4SLinus Torvalds 	if (this && !ino)
551da177e4SLinus Torvalds 		return 1;
561da177e4SLinus Torvalds 
571da177e4SLinus Torvalds 	/* Look to see if the inode in question is pending in the wbuf */
581da177e4SLinus Torvalds 	while (this) {
591da177e4SLinus Torvalds 		if (this->ino == ino)
601da177e4SLinus Torvalds 			return 1;
611da177e4SLinus Torvalds 		this = this->next;
621da177e4SLinus Torvalds 	}
631da177e4SLinus Torvalds 	return 0;
641da177e4SLinus Torvalds }
651da177e4SLinus Torvalds 
661da177e4SLinus Torvalds static void jffs2_clear_wbuf_ino_list(struct jffs2_sb_info *c)
671da177e4SLinus Torvalds {
681da177e4SLinus Torvalds 	struct jffs2_inodirty *this;
691da177e4SLinus Torvalds 
701da177e4SLinus Torvalds 	this = c->wbuf_inodes;
711da177e4SLinus Torvalds 
721da177e4SLinus Torvalds 	if (this != &inodirty_nomem) {
731da177e4SLinus Torvalds 		while (this) {
741da177e4SLinus Torvalds 			struct jffs2_inodirty *next = this->next;
751da177e4SLinus Torvalds 			kfree(this);
761da177e4SLinus Torvalds 			this = next;
771da177e4SLinus Torvalds 		}
781da177e4SLinus Torvalds 	}
791da177e4SLinus Torvalds 	c->wbuf_inodes = NULL;
801da177e4SLinus Torvalds }
811da177e4SLinus Torvalds 
821da177e4SLinus Torvalds static void jffs2_wbuf_dirties_inode(struct jffs2_sb_info *c, uint32_t ino)
831da177e4SLinus Torvalds {
841da177e4SLinus Torvalds 	struct jffs2_inodirty *new;
851da177e4SLinus Torvalds 
861da177e4SLinus Torvalds 	/* Mark the superblock dirty so that kupdated will flush... */
8764a5c2ebSJoakim Tjernlund 	jffs2_dirty_trigger(c);
881da177e4SLinus Torvalds 
891da177e4SLinus Torvalds 	if (jffs2_wbuf_pending_for_ino(c, ino))
901da177e4SLinus Torvalds 		return;
911da177e4SLinus Torvalds 
921da177e4SLinus Torvalds 	new = kmalloc(sizeof(*new), GFP_KERNEL);
931da177e4SLinus Torvalds 	if (!new) {
949c261b33SJoe Perches 		jffs2_dbg(1, "No memory to allocate inodirty. Fallback to all considered dirty\n");
951da177e4SLinus Torvalds 		jffs2_clear_wbuf_ino_list(c);
961da177e4SLinus Torvalds 		c->wbuf_inodes = &inodirty_nomem;
971da177e4SLinus Torvalds 		return;
981da177e4SLinus Torvalds 	}
991da177e4SLinus Torvalds 	new->ino = ino;
1001da177e4SLinus Torvalds 	new->next = c->wbuf_inodes;
1011da177e4SLinus Torvalds 	c->wbuf_inodes = new;
1021da177e4SLinus Torvalds 	return;
1031da177e4SLinus Torvalds }
1041da177e4SLinus Torvalds 
1051da177e4SLinus Torvalds static inline void jffs2_refile_wbuf_blocks(struct jffs2_sb_info *c)
1061da177e4SLinus Torvalds {
1071da177e4SLinus Torvalds 	struct list_head *this, *next;
1081da177e4SLinus Torvalds 	static int n;
1091da177e4SLinus Torvalds 
1101da177e4SLinus Torvalds 	if (list_empty(&c->erasable_pending_wbuf_list))
1111da177e4SLinus Torvalds 		return;
1121da177e4SLinus Torvalds 
1131da177e4SLinus Torvalds 	list_for_each_safe(this, next, &c->erasable_pending_wbuf_list) {
1141da177e4SLinus Torvalds 		struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
1151da177e4SLinus Torvalds 
1169c261b33SJoe Perches 		jffs2_dbg(1, "Removing eraseblock at 0x%08x from erasable_pending_wbuf_list...\n",
1179c261b33SJoe Perches 			  jeb->offset);
1181da177e4SLinus Torvalds 		list_del(this);
1191da177e4SLinus Torvalds 		if ((jiffies + (n++)) & 127) {
1201da177e4SLinus Torvalds 			/* Most of the time, we just erase it immediately. Otherwise we
1211da177e4SLinus Torvalds 			   spend ages scanning it on mount, etc. */
1229c261b33SJoe Perches 			jffs2_dbg(1, "...and adding to erase_pending_list\n");
1231da177e4SLinus Torvalds 			list_add_tail(&jeb->list, &c->erase_pending_list);
1241da177e4SLinus Torvalds 			c->nr_erasing_blocks++;
125ae3b6ba0SDavid Woodhouse 			jffs2_garbage_collect_trigger(c);
1261da177e4SLinus Torvalds 		} else {
1271da177e4SLinus Torvalds 			/* Sometimes, however, we leave it elsewhere so it doesn't get
1281da177e4SLinus Torvalds 			   immediately reused, and we spread the load a bit. */
1299c261b33SJoe Perches 			jffs2_dbg(1, "...and adding to erasable_list\n");
1301da177e4SLinus Torvalds 			list_add_tail(&jeb->list, &c->erasable_list);
1311da177e4SLinus Torvalds 		}
1321da177e4SLinus Torvalds 	}
1331da177e4SLinus Torvalds }
1341da177e4SLinus Torvalds 
1357f716cf3SEstelle Hammache #define REFILE_NOTEMPTY 0
1367f716cf3SEstelle Hammache #define REFILE_ANYWAY   1
1377f716cf3SEstelle Hammache 
1387f716cf3SEstelle Hammache static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int allow_empty)
1391da177e4SLinus Torvalds {
1409c261b33SJoe Perches 	jffs2_dbg(1, "About to refile bad block at %08x\n", jeb->offset);
1411da177e4SLinus Torvalds 
1421da177e4SLinus Torvalds 	/* File the existing block on the bad_used_list.... */
1431da177e4SLinus Torvalds 	if (c->nextblock == jeb)
1441da177e4SLinus Torvalds 		c->nextblock = NULL;
1451da177e4SLinus Torvalds 	else /* Not sure this should ever happen... need more coffee */
1461da177e4SLinus Torvalds 		list_del(&jeb->list);
1471da177e4SLinus Torvalds 	if (jeb->first_node) {
1489c261b33SJoe Perches 		jffs2_dbg(1, "Refiling block at %08x to bad_used_list\n",
1499c261b33SJoe Perches 			  jeb->offset);
1501da177e4SLinus Torvalds 		list_add(&jeb->list, &c->bad_used_list);
1511da177e4SLinus Torvalds 	} else {
1529b88f473SEstelle Hammache 		BUG_ON(allow_empty == REFILE_NOTEMPTY);
1531da177e4SLinus Torvalds 		/* It has to have had some nodes or we couldn't be here */
1549c261b33SJoe Perches 		jffs2_dbg(1, "Refiling block at %08x to erase_pending_list\n",
1559c261b33SJoe Perches 			  jeb->offset);
1561da177e4SLinus Torvalds 		list_add(&jeb->list, &c->erase_pending_list);
1571da177e4SLinus Torvalds 		c->nr_erasing_blocks++;
158ae3b6ba0SDavid Woodhouse 		jffs2_garbage_collect_trigger(c);
1591da177e4SLinus Torvalds 	}
1601da177e4SLinus Torvalds 
1619bfeb691SDavid Woodhouse 	if (!jffs2_prealloc_raw_node_refs(c, jeb, 1)) {
1629bfeb691SDavid Woodhouse 		uint32_t oldfree = jeb->free_size;
1639bfeb691SDavid Woodhouse 
1649bfeb691SDavid Woodhouse 		jffs2_link_node_ref(c, jeb,
1659bfeb691SDavid Woodhouse 				    (jeb->offset+c->sector_size-oldfree) | REF_OBSOLETE,
1669bfeb691SDavid Woodhouse 				    oldfree, NULL);
1679bfeb691SDavid Woodhouse 		/* convert to wasted */
1689bfeb691SDavid Woodhouse 		c->wasted_size += oldfree;
1699bfeb691SDavid Woodhouse 		jeb->wasted_size += oldfree;
1709bfeb691SDavid Woodhouse 		c->dirty_size -= oldfree;
1719bfeb691SDavid Woodhouse 		jeb->dirty_size -= oldfree;
1729bfeb691SDavid Woodhouse 	}
1731da177e4SLinus Torvalds 
174e0c8e42fSArtem B. Bityutskiy 	jffs2_dbg_dump_block_lists_nolock(c);
175e0c8e42fSArtem B. Bityutskiy 	jffs2_dbg_acct_sanity_check_nolock(c,jeb);
176e0c8e42fSArtem B. Bityutskiy 	jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
1771da177e4SLinus Torvalds }
1781da177e4SLinus Torvalds 
1799bfeb691SDavid Woodhouse static struct jffs2_raw_node_ref **jffs2_incore_replace_raw(struct jffs2_sb_info *c,
1809bfeb691SDavid Woodhouse 							    struct jffs2_inode_info *f,
1819bfeb691SDavid Woodhouse 							    struct jffs2_raw_node_ref *raw,
1829bfeb691SDavid Woodhouse 							    union jffs2_node_union *node)
1839bfeb691SDavid Woodhouse {
1849bfeb691SDavid Woodhouse 	struct jffs2_node_frag *frag;
1859bfeb691SDavid Woodhouse 	struct jffs2_full_dirent *fd;
1869bfeb691SDavid Woodhouse 
1879bfeb691SDavid Woodhouse 	dbg_noderef("incore_replace_raw: node at %p is {%04x,%04x}\n",
1889bfeb691SDavid Woodhouse 		    node, je16_to_cpu(node->u.magic), je16_to_cpu(node->u.nodetype));
1899bfeb691SDavid Woodhouse 
1909bfeb691SDavid Woodhouse 	BUG_ON(je16_to_cpu(node->u.magic) != 0x1985 &&
1919bfeb691SDavid Woodhouse 	       je16_to_cpu(node->u.magic) != 0);
1929bfeb691SDavid Woodhouse 
1939bfeb691SDavid Woodhouse 	switch (je16_to_cpu(node->u.nodetype)) {
1949bfeb691SDavid Woodhouse 	case JFFS2_NODETYPE_INODE:
195ddc58bd6SDavid Woodhouse 		if (f->metadata && f->metadata->raw == raw) {
196ddc58bd6SDavid Woodhouse 			dbg_noderef("Will replace ->raw in f->metadata at %p\n", f->metadata);
197ddc58bd6SDavid Woodhouse 			return &f->metadata->raw;
198ddc58bd6SDavid Woodhouse 		}
1999bfeb691SDavid Woodhouse 		frag = jffs2_lookup_node_frag(&f->fragtree, je32_to_cpu(node->i.offset));
2009bfeb691SDavid Woodhouse 		BUG_ON(!frag);
2019bfeb691SDavid Woodhouse 		/* Find a frag which refers to the full_dnode we want to modify */
2029bfeb691SDavid Woodhouse 		while (!frag->node || frag->node->raw != raw) {
2039bfeb691SDavid Woodhouse 			frag = frag_next(frag);
2049bfeb691SDavid Woodhouse 			BUG_ON(!frag);
2059bfeb691SDavid Woodhouse 		}
2069bfeb691SDavid Woodhouse 		dbg_noderef("Will replace ->raw in full_dnode at %p\n", frag->node);
2079bfeb691SDavid Woodhouse 		return &frag->node->raw;
2089bfeb691SDavid Woodhouse 
2099bfeb691SDavid Woodhouse 	case JFFS2_NODETYPE_DIRENT:
2109bfeb691SDavid Woodhouse 		for (fd = f->dents; fd; fd = fd->next) {
2119bfeb691SDavid Woodhouse 			if (fd->raw == raw) {
2129bfeb691SDavid Woodhouse 				dbg_noderef("Will replace ->raw in full_dirent at %p\n", fd);
2139bfeb691SDavid Woodhouse 				return &fd->raw;
2149bfeb691SDavid Woodhouse 			}
2159bfeb691SDavid Woodhouse 		}
2169bfeb691SDavid Woodhouse 		BUG();
217ddc58bd6SDavid Woodhouse 
2189bfeb691SDavid Woodhouse 	default:
2199bfeb691SDavid Woodhouse 		dbg_noderef("Don't care about replacing raw for nodetype %x\n",
2209bfeb691SDavid Woodhouse 			    je16_to_cpu(node->u.nodetype));
2219bfeb691SDavid Woodhouse 		break;
2229bfeb691SDavid Woodhouse 	}
2239bfeb691SDavid Woodhouse 	return NULL;
2249bfeb691SDavid Woodhouse }
2259bfeb691SDavid Woodhouse 
226a6bc432eSDavid Woodhouse #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
227a6bc432eSDavid Woodhouse static int jffs2_verify_write(struct jffs2_sb_info *c, unsigned char *buf,
228a6bc432eSDavid Woodhouse 			      uint32_t ofs)
229a6bc432eSDavid Woodhouse {
230a6bc432eSDavid Woodhouse 	int ret;
231a6bc432eSDavid Woodhouse 	size_t retlen;
232a6bc432eSDavid Woodhouse 	char *eccstr;
233a6bc432eSDavid Woodhouse 
234329ad399SArtem Bityutskiy 	ret = mtd_read(c->mtd, ofs, c->wbuf_pagesize, &retlen, c->wbuf_verify);
235a6bc432eSDavid Woodhouse 	if (ret && ret != -EUCLEAN && ret != -EBADMSG) {
236da320f05SJoe Perches 		pr_warn("%s(): Read back of page at %08x failed: %d\n",
237da320f05SJoe Perches 			__func__, c->wbuf_ofs, ret);
238a6bc432eSDavid Woodhouse 		return ret;
239a6bc432eSDavid Woodhouse 	} else if (retlen != c->wbuf_pagesize) {
240da320f05SJoe Perches 		pr_warn("%s(): Read back of page at %08x gave short read: %zd not %d\n",
241da320f05SJoe Perches 			__func__, ofs, retlen, c->wbuf_pagesize);
242a6bc432eSDavid Woodhouse 		return -EIO;
243a6bc432eSDavid Woodhouse 	}
244a6bc432eSDavid Woodhouse 	if (!memcmp(buf, c->wbuf_verify, c->wbuf_pagesize))
245a6bc432eSDavid Woodhouse 		return 0;
246a6bc432eSDavid Woodhouse 
247a6bc432eSDavid Woodhouse 	if (ret == -EUCLEAN)
248a6bc432eSDavid Woodhouse 		eccstr = "corrected";
249a6bc432eSDavid Woodhouse 	else if (ret == -EBADMSG)
250a6bc432eSDavid Woodhouse 		eccstr = "correction failed";
251a6bc432eSDavid Woodhouse 	else
252a6bc432eSDavid Woodhouse 		eccstr = "OK or unused";
253a6bc432eSDavid Woodhouse 
254da320f05SJoe Perches 	pr_warn("Write verify error (ECC %s) at %08x. Wrote:\n",
255a6bc432eSDavid Woodhouse 		eccstr, c->wbuf_ofs);
256a6bc432eSDavid Woodhouse 	print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1,
257a6bc432eSDavid Woodhouse 		       c->wbuf, c->wbuf_pagesize, 0);
258a6bc432eSDavid Woodhouse 
259da320f05SJoe Perches 	pr_warn("Read back:\n");
260a6bc432eSDavid Woodhouse 	print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1,
261a6bc432eSDavid Woodhouse 		       c->wbuf_verify, c->wbuf_pagesize, 0);
262a6bc432eSDavid Woodhouse 
263a6bc432eSDavid Woodhouse 	return -EIO;
264a6bc432eSDavid Woodhouse }
265a6bc432eSDavid Woodhouse #else
266a6bc432eSDavid Woodhouse #define jffs2_verify_write(c,b,o) (0)
267a6bc432eSDavid Woodhouse #endif
268a6bc432eSDavid Woodhouse 
2691da177e4SLinus Torvalds /* Recover from failure to write wbuf. Recover the nodes up to the
2701da177e4SLinus Torvalds  * wbuf, not the one which we were starting to try to write. */
2711da177e4SLinus Torvalds 
2721da177e4SLinus Torvalds static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
2731da177e4SLinus Torvalds {
2741da177e4SLinus Torvalds 	struct jffs2_eraseblock *jeb, *new_jeb;
2759bfeb691SDavid Woodhouse 	struct jffs2_raw_node_ref *raw, *next, *first_raw = NULL;
2761da177e4SLinus Torvalds 	size_t retlen;
2771da177e4SLinus Torvalds 	int ret;
2789bfeb691SDavid Woodhouse 	int nr_refile = 0;
2791da177e4SLinus Torvalds 	unsigned char *buf;
2801da177e4SLinus Torvalds 	uint32_t start, end, ofs, len;
2811da177e4SLinus Torvalds 
282046b8b98SDavid Woodhouse 	jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
283046b8b98SDavid Woodhouse 
2841da177e4SLinus Torvalds 	spin_lock(&c->erase_completion_lock);
285180bfb31SVitaly Wool 	if (c->wbuf_ofs % c->mtd->erasesize)
2867f716cf3SEstelle Hammache 		jffs2_block_refile(c, jeb, REFILE_NOTEMPTY);
287180bfb31SVitaly Wool 	else
288180bfb31SVitaly Wool 		jffs2_block_refile(c, jeb, REFILE_ANYWAY);
2899bfeb691SDavid Woodhouse 	spin_unlock(&c->erase_completion_lock);
2909bfeb691SDavid Woodhouse 
2919bfeb691SDavid Woodhouse 	BUG_ON(!ref_obsolete(jeb->last_node));
2921da177e4SLinus Torvalds 
2931da177e4SLinus Torvalds 	/* Find the first node to be recovered, by skipping over every
2941da177e4SLinus Torvalds 	   node which ends before the wbuf starts, or which is obsolete. */
2959bfeb691SDavid Woodhouse 	for (next = raw = jeb->first_node; next; raw = next) {
2969bfeb691SDavid Woodhouse 		next = ref_next(raw);
2979bfeb691SDavid Woodhouse 
2989bfeb691SDavid Woodhouse 		if (ref_obsolete(raw) ||
2999bfeb691SDavid Woodhouse 		    (next && ref_offset(next) <= c->wbuf_ofs)) {
3009bfeb691SDavid Woodhouse 			dbg_noderef("Skipping node at 0x%08x(%d)-0x%08x which is either before 0x%08x or obsolete\n",
3019bfeb691SDavid Woodhouse 				    ref_offset(raw), ref_flags(raw),
3029bfeb691SDavid Woodhouse 				    (ref_offset(raw) + ref_totlen(c, jeb, raw)),
3039bfeb691SDavid Woodhouse 				    c->wbuf_ofs);
3049bfeb691SDavid Woodhouse 			continue;
3059bfeb691SDavid Woodhouse 		}
3069bfeb691SDavid Woodhouse 		dbg_noderef("First node to be recovered is at 0x%08x(%d)-0x%08x\n",
3079bfeb691SDavid Woodhouse 			    ref_offset(raw), ref_flags(raw),
3089bfeb691SDavid Woodhouse 			    (ref_offset(raw) + ref_totlen(c, jeb, raw)));
3099bfeb691SDavid Woodhouse 
3109bfeb691SDavid Woodhouse 		first_raw = raw;
3119bfeb691SDavid Woodhouse 		break;
3121da177e4SLinus Torvalds 	}
3131da177e4SLinus Torvalds 
3149bfeb691SDavid Woodhouse 	if (!first_raw) {
3151da177e4SLinus Torvalds 		/* All nodes were obsolete. Nothing to recover. */
3169c261b33SJoe Perches 		jffs2_dbg(1, "No non-obsolete nodes to be recovered. Just filing block bad\n");
3179bfeb691SDavid Woodhouse 		c->wbuf_len = 0;
3181da177e4SLinus Torvalds 		return;
3191da177e4SLinus Torvalds 	}
3201da177e4SLinus Torvalds 
3219bfeb691SDavid Woodhouse 	start = ref_offset(first_raw);
3229bfeb691SDavid Woodhouse 	end = ref_offset(jeb->last_node);
3239bfeb691SDavid Woodhouse 	nr_refile = 1;
3241da177e4SLinus Torvalds 
3259bfeb691SDavid Woodhouse 	/* Count the number of refs which need to be copied */
3269bfeb691SDavid Woodhouse 	while ((raw = ref_next(raw)) != jeb->last_node)
3279bfeb691SDavid Woodhouse 		nr_refile++;
3281da177e4SLinus Torvalds 
3299bfeb691SDavid Woodhouse 	dbg_noderef("wbuf recover %08x-%08x (%d bytes in %d nodes)\n",
3309bfeb691SDavid Woodhouse 		    start, end, end - start, nr_refile);
3311da177e4SLinus Torvalds 
3321da177e4SLinus Torvalds 	buf = NULL;
3331da177e4SLinus Torvalds 	if (start < c->wbuf_ofs) {
3341da177e4SLinus Torvalds 		/* First affected node was already partially written.
3351da177e4SLinus Torvalds 		 * Attempt to reread the old data into our buffer. */
3361da177e4SLinus Torvalds 
3371da177e4SLinus Torvalds 		buf = kmalloc(end - start, GFP_KERNEL);
3381da177e4SLinus Torvalds 		if (!buf) {
339da320f05SJoe Perches 			pr_crit("Malloc failure in wbuf recovery. Data loss ensues.\n");
3401da177e4SLinus Torvalds 
3411da177e4SLinus Torvalds 			goto read_failed;
3421da177e4SLinus Torvalds 		}
3431da177e4SLinus Torvalds 
3441da177e4SLinus Torvalds 		/* Do the read... */
345329ad399SArtem Bityutskiy 		ret = mtd_read(c->mtd, start, c->wbuf_ofs - start, &retlen,
346329ad399SArtem Bityutskiy 			       buf);
3471da177e4SLinus Torvalds 
3489a1fcdfdSThomas Gleixner 		/* ECC recovered ? */
3499a1fcdfdSThomas Gleixner 		if ((ret == -EUCLEAN || ret == -EBADMSG) &&
3509a1fcdfdSThomas Gleixner 		    (retlen == c->wbuf_ofs - start))
3511da177e4SLinus Torvalds 			ret = 0;
3529a1fcdfdSThomas Gleixner 
3531da177e4SLinus Torvalds 		if (ret || retlen != c->wbuf_ofs - start) {
354da320f05SJoe Perches 			pr_crit("Old data are already lost in wbuf recovery. Data loss ensues.\n");
3551da177e4SLinus Torvalds 
3561da177e4SLinus Torvalds 			kfree(buf);
3571da177e4SLinus Torvalds 			buf = NULL;
3581da177e4SLinus Torvalds 		read_failed:
3599bfeb691SDavid Woodhouse 			first_raw = ref_next(first_raw);
3609bfeb691SDavid Woodhouse 			nr_refile--;
3619bfeb691SDavid Woodhouse 			while (first_raw && ref_obsolete(first_raw)) {
3629bfeb691SDavid Woodhouse 				first_raw = ref_next(first_raw);
3639bfeb691SDavid Woodhouse 				nr_refile--;
3649bfeb691SDavid Woodhouse 			}
3659bfeb691SDavid Woodhouse 
3661da177e4SLinus Torvalds 			/* If this was the only node to be recovered, give up */
3679bfeb691SDavid Woodhouse 			if (!first_raw) {
3689bfeb691SDavid Woodhouse 				c->wbuf_len = 0;
3691da177e4SLinus Torvalds 				return;
3709bfeb691SDavid Woodhouse 			}
3711da177e4SLinus Torvalds 
3721da177e4SLinus Torvalds 			/* It wasn't. Go on and try to recover nodes complete in the wbuf */
3739bfeb691SDavid Woodhouse 			start = ref_offset(first_raw);
3749bfeb691SDavid Woodhouse 			dbg_noderef("wbuf now recover %08x-%08x (%d bytes in %d nodes)\n",
3759bfeb691SDavid Woodhouse 				    start, end, end - start, nr_refile);
3769bfeb691SDavid Woodhouse 
3771da177e4SLinus Torvalds 		} else {
3781da177e4SLinus Torvalds 			/* Read succeeded. Copy the remaining data from the wbuf */
3791da177e4SLinus Torvalds 			memcpy(buf + (c->wbuf_ofs - start), c->wbuf, end - c->wbuf_ofs);
3801da177e4SLinus Torvalds 		}
3811da177e4SLinus Torvalds 	}
3821da177e4SLinus Torvalds 	/* OK... we're to rewrite (end-start) bytes of data from first_raw onwards.
3831da177e4SLinus Torvalds 	   Either 'buf' contains the data, or we find it in the wbuf */
3841da177e4SLinus Torvalds 
3851da177e4SLinus Torvalds 	/* ... and get an allocation of space from a shiny new block instead */
3869fe4854cSDavid Woodhouse 	ret = jffs2_reserve_space_gc(c, end-start, &len, JFFS2_SUMMARY_NOSUM_SIZE);
3871da177e4SLinus Torvalds 	if (ret) {
388da320f05SJoe Perches 		pr_warn("Failed to allocate space for wbuf recovery. Data loss ensues.\n");
3891da177e4SLinus Torvalds 		kfree(buf);
3901da177e4SLinus Torvalds 		return;
3911da177e4SLinus Torvalds 	}
3929bfeb691SDavid Woodhouse 
3937f762ab2SAdrian Hunter 	/* The summary is not recovered, so it must be disabled for this erase block */
3947f762ab2SAdrian Hunter 	jffs2_sum_disable_collecting(c->summary);
3957f762ab2SAdrian Hunter 
3969bfeb691SDavid Woodhouse 	ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, nr_refile);
3979bfeb691SDavid Woodhouse 	if (ret) {
398da320f05SJoe Perches 		pr_warn("Failed to allocate node refs for wbuf recovery. Data loss ensues.\n");
3999bfeb691SDavid Woodhouse 		kfree(buf);
4009bfeb691SDavid Woodhouse 		return;
4019bfeb691SDavid Woodhouse 	}
4029bfeb691SDavid Woodhouse 
4039fe4854cSDavid Woodhouse 	ofs = write_ofs(c);
4049fe4854cSDavid Woodhouse 
4051da177e4SLinus Torvalds 	if (end-start >= c->wbuf_pagesize) {
4067f716cf3SEstelle Hammache 		/* Need to do another write immediately, but it's possible
4077f716cf3SEstelle Hammache 		   that this is just because the wbuf itself is completely
4087f716cf3SEstelle Hammache 		   full, and there's nothing earlier read back from the
4097f716cf3SEstelle Hammache 		   flash. Hence 'buf' isn't necessarily what we're writing
4107f716cf3SEstelle Hammache 		   from. */
4117f716cf3SEstelle Hammache 		unsigned char *rewrite_buf = buf?:c->wbuf;
4121da177e4SLinus Torvalds 		uint32_t towrite = (end-start) - ((end-start)%c->wbuf_pagesize);
4131da177e4SLinus Torvalds 
4149c261b33SJoe Perches 		jffs2_dbg(1, "Write 0x%x bytes at 0x%08x in wbuf recover\n",
4159c261b33SJoe Perches 			  towrite, ofs);
4161da177e4SLinus Torvalds 
4171da177e4SLinus Torvalds #ifdef BREAKMEHEADER
4181da177e4SLinus Torvalds 		static int breakme;
4191da177e4SLinus Torvalds 		if (breakme++ == 20) {
420da320f05SJoe Perches 			pr_notice("Faking write error at 0x%08x\n", ofs);
4211da177e4SLinus Torvalds 			breakme = 0;
422eda95cbfSArtem Bityutskiy 			mtd_write(c->mtd, ofs, towrite, &retlen, brokenbuf);
4231da177e4SLinus Torvalds 			ret = -EIO;
4241da177e4SLinus Torvalds 		} else
4251da177e4SLinus Torvalds #endif
426eda95cbfSArtem Bityutskiy 			ret = mtd_write(c->mtd, ofs, towrite, &retlen,
4279223a456SThomas Gleixner 					rewrite_buf);
4281da177e4SLinus Torvalds 
429a6bc432eSDavid Woodhouse 		if (ret || retlen != towrite || jffs2_verify_write(c, rewrite_buf, ofs)) {
4301da177e4SLinus Torvalds 			/* Argh. We tried. Really we did. */
431da320f05SJoe Perches 			pr_crit("Recovery of wbuf failed due to a second write error\n");
4321da177e4SLinus Torvalds 			kfree(buf);
4331da177e4SLinus Torvalds 
4342f785402SDavid Woodhouse 			if (retlen)
4359bfeb691SDavid Woodhouse 				jffs2_add_physical_node_ref(c, ofs | REF_OBSOLETE, ref_totlen(c, jeb, first_raw), NULL);
4361da177e4SLinus Torvalds 
4371da177e4SLinus Torvalds 			return;
4381da177e4SLinus Torvalds 		}
439da320f05SJoe Perches 		pr_notice("Recovery of wbuf succeeded to %08x\n", ofs);
4401da177e4SLinus Torvalds 
4411da177e4SLinus Torvalds 		c->wbuf_len = (end - start) - towrite;
4421da177e4SLinus Torvalds 		c->wbuf_ofs = ofs + towrite;
4437f716cf3SEstelle Hammache 		memmove(c->wbuf, rewrite_buf + towrite, c->wbuf_len);
4441da177e4SLinus Torvalds 		/* Don't muck about with c->wbuf_inodes. False positives are harmless. */
4451da177e4SLinus Torvalds 	} else {
4461da177e4SLinus Torvalds 		/* OK, now we're left with the dregs in whichever buffer we're using */
4471da177e4SLinus Torvalds 		if (buf) {
4481da177e4SLinus Torvalds 			memcpy(c->wbuf, buf, end-start);
4491da177e4SLinus Torvalds 		} else {
4501da177e4SLinus Torvalds 			memmove(c->wbuf, c->wbuf + (start - c->wbuf_ofs), end - start);
4511da177e4SLinus Torvalds 		}
4521da177e4SLinus Torvalds 		c->wbuf_ofs = ofs;
4531da177e4SLinus Torvalds 		c->wbuf_len = end - start;
4541da177e4SLinus Torvalds 	}
4551da177e4SLinus Torvalds 
4561da177e4SLinus Torvalds 	/* Now sort out the jffs2_raw_node_refs, moving them from the old to the next block */
4571da177e4SLinus Torvalds 	new_jeb = &c->blocks[ofs / c->sector_size];
4581da177e4SLinus Torvalds 
4591da177e4SLinus Torvalds 	spin_lock(&c->erase_completion_lock);
4609bfeb691SDavid Woodhouse 	for (raw = first_raw; raw != jeb->last_node; raw = ref_next(raw)) {
4619bfeb691SDavid Woodhouse 		uint32_t rawlen = ref_totlen(c, jeb, raw);
4629bfeb691SDavid Woodhouse 		struct jffs2_inode_cache *ic;
4639bfeb691SDavid Woodhouse 		struct jffs2_raw_node_ref *new_ref;
4649bfeb691SDavid Woodhouse 		struct jffs2_raw_node_ref **adjust_ref = NULL;
4659bfeb691SDavid Woodhouse 		struct jffs2_inode_info *f = NULL;
4661da177e4SLinus Torvalds 
4679c261b33SJoe Perches 		jffs2_dbg(1, "Refiling block of %08x at %08x(%d) to %08x\n",
4689c261b33SJoe Perches 			  rawlen, ref_offset(raw), ref_flags(raw), ofs);
4691da177e4SLinus Torvalds 
4709bfeb691SDavid Woodhouse 		ic = jffs2_raw_ref_to_ic(raw);
4719bfeb691SDavid Woodhouse 
4729bfeb691SDavid Woodhouse 		/* Ick. This XATTR mess should be fixed shortly... */
4739bfeb691SDavid Woodhouse 		if (ic && ic->class == RAWNODE_CLASS_XATTR_DATUM) {
4749bfeb691SDavid Woodhouse 			struct jffs2_xattr_datum *xd = (void *)ic;
4759bfeb691SDavid Woodhouse 			BUG_ON(xd->node != raw);
4769bfeb691SDavid Woodhouse 			adjust_ref = &xd->node;
4779bfeb691SDavid Woodhouse 			raw->next_in_ino = NULL;
4789bfeb691SDavid Woodhouse 			ic = NULL;
4799bfeb691SDavid Woodhouse 		} else if (ic && ic->class == RAWNODE_CLASS_XATTR_REF) {
4809bfeb691SDavid Woodhouse 			struct jffs2_xattr_datum *xr = (void *)ic;
4819bfeb691SDavid Woodhouse 			BUG_ON(xr->node != raw);
4829bfeb691SDavid Woodhouse 			adjust_ref = &xr->node;
4839bfeb691SDavid Woodhouse 			raw->next_in_ino = NULL;
4849bfeb691SDavid Woodhouse 			ic = NULL;
4859bfeb691SDavid Woodhouse 		} else if (ic && ic->class == RAWNODE_CLASS_INODE_CACHE) {
4869bfeb691SDavid Woodhouse 			struct jffs2_raw_node_ref **p = &ic->nodes;
4879bfeb691SDavid Woodhouse 
4889bfeb691SDavid Woodhouse 			/* Remove the old node from the per-inode list */
4899bfeb691SDavid Woodhouse 			while (*p && *p != (void *)ic) {
4909bfeb691SDavid Woodhouse 				if (*p == raw) {
4919bfeb691SDavid Woodhouse 					(*p) = (raw->next_in_ino);
4929bfeb691SDavid Woodhouse 					raw->next_in_ino = NULL;
4939bfeb691SDavid Woodhouse 					break;
4949bfeb691SDavid Woodhouse 				}
4959bfeb691SDavid Woodhouse 				p = &((*p)->next_in_ino);
4969bfeb691SDavid Woodhouse 			}
4979bfeb691SDavid Woodhouse 
4989bfeb691SDavid Woodhouse 			if (ic->state == INO_STATE_PRESENT && !ref_obsolete(raw)) {
4999bfeb691SDavid Woodhouse 				/* If it's an in-core inode, then we have to adjust any
5009bfeb691SDavid Woodhouse 				   full_dirent or full_dnode structure to point to the
5019bfeb691SDavid Woodhouse 				   new version instead of the old */
50227c72b04SDavid Woodhouse 				f = jffs2_gc_fetch_inode(c, ic->ino, !ic->pino_nlink);
5039bfeb691SDavid Woodhouse 				if (IS_ERR(f)) {
5049bfeb691SDavid Woodhouse 					/* Should never happen; it _must_ be present */
5059bfeb691SDavid Woodhouse 					JFFS2_ERROR("Failed to iget() ino #%u, err %ld\n",
5069bfeb691SDavid Woodhouse 						    ic->ino, PTR_ERR(f));
5079bfeb691SDavid Woodhouse 					BUG();
5089bfeb691SDavid Woodhouse 				}
5099bfeb691SDavid Woodhouse 				/* We don't lock f->sem. There's a number of ways we could
5109bfeb691SDavid Woodhouse 				   end up in here with it already being locked, and nobody's
5119bfeb691SDavid Woodhouse 				   going to modify it on us anyway because we hold the
5129bfeb691SDavid Woodhouse 				   alloc_sem. We're only changing one ->raw pointer too,
5139bfeb691SDavid Woodhouse 				   which we can get away with without upsetting readers. */
5149bfeb691SDavid Woodhouse 				adjust_ref = jffs2_incore_replace_raw(c, f, raw,
5159bfeb691SDavid Woodhouse 								      (void *)(buf?:c->wbuf) + (ref_offset(raw) - start));
5169bfeb691SDavid Woodhouse 			} else if (unlikely(ic->state != INO_STATE_PRESENT &&
5179bfeb691SDavid Woodhouse 					    ic->state != INO_STATE_CHECKEDABSENT &&
5189bfeb691SDavid Woodhouse 					    ic->state != INO_STATE_GC)) {
5199bfeb691SDavid Woodhouse 				JFFS2_ERROR("Inode #%u is in strange state %d!\n", ic->ino, ic->state);
5209bfeb691SDavid Woodhouse 				BUG();
5219bfeb691SDavid Woodhouse 			}
5229bfeb691SDavid Woodhouse 		}
5239bfeb691SDavid Woodhouse 
5249bfeb691SDavid Woodhouse 		new_ref = jffs2_link_node_ref(c, new_jeb, ofs | ref_flags(raw), rawlen, ic);
5259bfeb691SDavid Woodhouse 
5269bfeb691SDavid Woodhouse 		if (adjust_ref) {
5279bfeb691SDavid Woodhouse 			BUG_ON(*adjust_ref != raw);
5289bfeb691SDavid Woodhouse 			*adjust_ref = new_ref;
5299bfeb691SDavid Woodhouse 		}
5309bfeb691SDavid Woodhouse 		if (f)
5319bfeb691SDavid Woodhouse 			jffs2_gc_release_inode(c, f);
5329bfeb691SDavid Woodhouse 
5339bfeb691SDavid Woodhouse 		if (!ref_obsolete(raw)) {
5341da177e4SLinus Torvalds 			jeb->dirty_size += rawlen;
5351da177e4SLinus Torvalds 			jeb->used_size  -= rawlen;
5361da177e4SLinus Torvalds 			c->dirty_size += rawlen;
5379bfeb691SDavid Woodhouse 			c->used_size -= rawlen;
5389bfeb691SDavid Woodhouse 			raw->flash_offset = ref_offset(raw) | REF_OBSOLETE;
5399bfeb691SDavid Woodhouse 			BUG_ON(raw->next_in_ino);
5401da177e4SLinus Torvalds 		}
5411da177e4SLinus Torvalds 		ofs += rawlen;
5421da177e4SLinus Torvalds 	}
5431da177e4SLinus Torvalds 
5449bfeb691SDavid Woodhouse 	kfree(buf);
5459bfeb691SDavid Woodhouse 
5461da177e4SLinus Torvalds 	/* Fix up the original jeb now it's on the bad_list */
5479bfeb691SDavid Woodhouse 	if (first_raw == jeb->first_node) {
5489c261b33SJoe Perches 		jffs2_dbg(1, "Failing block at %08x is now empty. Moving to erase_pending_list\n",
5499c261b33SJoe Perches 			  jeb->offset);
550f116629dSAkinobu Mita 		list_move(&jeb->list, &c->erase_pending_list);
5511da177e4SLinus Torvalds 		c->nr_erasing_blocks++;
552ae3b6ba0SDavid Woodhouse 		jffs2_garbage_collect_trigger(c);
5531da177e4SLinus Torvalds 	}
5541da177e4SLinus Torvalds 
555e0c8e42fSArtem B. Bityutskiy 	jffs2_dbg_acct_sanity_check_nolock(c, jeb);
556e0c8e42fSArtem B. Bityutskiy 	jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
5571da177e4SLinus Torvalds 
558e0c8e42fSArtem B. Bityutskiy 	jffs2_dbg_acct_sanity_check_nolock(c, new_jeb);
559e0c8e42fSArtem B. Bityutskiy 	jffs2_dbg_acct_paranoia_check_nolock(c, new_jeb);
5601da177e4SLinus Torvalds 
5611da177e4SLinus Torvalds 	spin_unlock(&c->erase_completion_lock);
5621da177e4SLinus Torvalds 
5639c261b33SJoe Perches 	jffs2_dbg(1, "wbuf recovery completed OK. wbuf_ofs 0x%08x, len 0x%x\n",
5649c261b33SJoe Perches 		  c->wbuf_ofs, c->wbuf_len);
5659bfeb691SDavid Woodhouse 
5661da177e4SLinus Torvalds }
5671da177e4SLinus Torvalds 
5681da177e4SLinus Torvalds /* Meaning of pad argument:
5691da177e4SLinus Torvalds    0: Do not pad. Probably pointless - we only ever use this when we can't pad anyway.
5701da177e4SLinus Torvalds    1: Pad, do not adjust nextblock free_size
5711da177e4SLinus Torvalds    2: Pad, adjust nextblock free_size
5721da177e4SLinus Torvalds */
5731da177e4SLinus Torvalds #define NOPAD		0
5741da177e4SLinus Torvalds #define PAD_NOACCOUNT	1
5751da177e4SLinus Torvalds #define PAD_ACCOUNTING	2
5761da177e4SLinus Torvalds 
5771da177e4SLinus Torvalds static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
5781da177e4SLinus Torvalds {
5799bfeb691SDavid Woodhouse 	struct jffs2_eraseblock *wbuf_jeb;
5801da177e4SLinus Torvalds 	int ret;
5811da177e4SLinus Torvalds 	size_t retlen;
5821da177e4SLinus Torvalds 
5833be36675SAndrew Victor 	/* Nothing to do if not write-buffering the flash. In particular, we shouldn't
5841da177e4SLinus Torvalds 	   del_timer() the timer we never initialised. */
5853be36675SAndrew Victor 	if (!jffs2_is_writebuffered(c))
5861da177e4SLinus Torvalds 		return 0;
5871da177e4SLinus Torvalds 
58851b11e36SAlexey Khoroshilov 	if (!mutex_is_locked(&c->alloc_sem)) {
589da320f05SJoe Perches 		pr_crit("jffs2_flush_wbuf() called with alloc_sem not locked!\n");
5901da177e4SLinus Torvalds 		BUG();
5911da177e4SLinus Torvalds 	}
5921da177e4SLinus Torvalds 
5933be36675SAndrew Victor 	if (!c->wbuf_len)	/* already checked c->wbuf above */
5941da177e4SLinus Torvalds 		return 0;
5951da177e4SLinus Torvalds 
5969bfeb691SDavid Woodhouse 	wbuf_jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
5979bfeb691SDavid Woodhouse 	if (jffs2_prealloc_raw_node_refs(c, wbuf_jeb, c->nextblock->allocated_refs + 1))
5982f785402SDavid Woodhouse 		return -ENOMEM;
5992f785402SDavid Woodhouse 
6001da177e4SLinus Torvalds 	/* claim remaining space on the page
6011da177e4SLinus Torvalds 	   this happens, if we have a change to a new block,
6021da177e4SLinus Torvalds 	   or if fsync forces us to flush the writebuffer.
6031da177e4SLinus Torvalds 	   if we have a switch to next page, we will not have
6041da177e4SLinus Torvalds 	   enough remaining space for this.
6051da177e4SLinus Torvalds 	*/
606daba5cc4SArtem B. Bityutskiy 	if (pad ) {
6071da177e4SLinus Torvalds 		c->wbuf_len = PAD(c->wbuf_len);
6081da177e4SLinus Torvalds 
6091da177e4SLinus Torvalds 		/* Pad with JFFS2_DIRTY_BITMASK initially.  this helps out ECC'd NOR
6101da177e4SLinus Torvalds 		   with 8 byte page size */
6111da177e4SLinus Torvalds 		memset(c->wbuf + c->wbuf_len, 0, c->wbuf_pagesize - c->wbuf_len);
6121da177e4SLinus Torvalds 
6131da177e4SLinus Torvalds 		if ( c->wbuf_len + sizeof(struct jffs2_unknown_node) < c->wbuf_pagesize) {
6141da177e4SLinus Torvalds 			struct jffs2_unknown_node *padnode = (void *)(c->wbuf + c->wbuf_len);
6151da177e4SLinus Torvalds 			padnode->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
6161da177e4SLinus Torvalds 			padnode->nodetype = cpu_to_je16(JFFS2_NODETYPE_PADDING);
6171da177e4SLinus Torvalds 			padnode->totlen = cpu_to_je32(c->wbuf_pagesize - c->wbuf_len);
6181da177e4SLinus Torvalds 			padnode->hdr_crc = cpu_to_je32(crc32(0, padnode, sizeof(*padnode)-4));
6191da177e4SLinus Torvalds 		}
6201da177e4SLinus Torvalds 	}
6211da177e4SLinus Torvalds 	/* else jffs2_flash_writev has actually filled in the rest of the
6221da177e4SLinus Torvalds 	   buffer for us, and will deal with the node refs etc. later. */
6231da177e4SLinus Torvalds 
6241da177e4SLinus Torvalds #ifdef BREAKME
6251da177e4SLinus Torvalds 	static int breakme;
6261da177e4SLinus Torvalds 	if (breakme++ == 20) {
627da320f05SJoe Perches 		pr_notice("Faking write error at 0x%08x\n", c->wbuf_ofs);
6281da177e4SLinus Torvalds 		breakme = 0;
629eda95cbfSArtem Bityutskiy 		mtd_write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen,
6309223a456SThomas Gleixner 			  brokenbuf);
6311da177e4SLinus Torvalds 		ret = -EIO;
6321da177e4SLinus Torvalds 	} else
6331da177e4SLinus Torvalds #endif
6341da177e4SLinus Torvalds 
635eda95cbfSArtem Bityutskiy 		ret = mtd_write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize,
636eda95cbfSArtem Bityutskiy 				&retlen, c->wbuf);
6371da177e4SLinus Torvalds 
638a6bc432eSDavid Woodhouse 	if (ret) {
639da320f05SJoe Perches 		pr_warn("jffs2_flush_wbuf(): Write failed with %d\n", ret);
640a6bc432eSDavid Woodhouse 		goto wfail;
641a6bc432eSDavid Woodhouse 	} else if (retlen != c->wbuf_pagesize) {
642da320f05SJoe Perches 		pr_warn("jffs2_flush_wbuf(): Write was short: %zd instead of %d\n",
6431da177e4SLinus Torvalds 			retlen, c->wbuf_pagesize);
6441da177e4SLinus Torvalds 		ret = -EIO;
645a6bc432eSDavid Woodhouse 		goto wfail;
646a6bc432eSDavid Woodhouse 	} else if ((ret = jffs2_verify_write(c, c->wbuf, c->wbuf_ofs))) {
647a6bc432eSDavid Woodhouse 	wfail:
6481da177e4SLinus Torvalds 		jffs2_wbuf_recover(c);
6491da177e4SLinus Torvalds 
6501da177e4SLinus Torvalds 		return ret;
6511da177e4SLinus Torvalds 	}
6521da177e4SLinus Torvalds 
6531da177e4SLinus Torvalds 	/* Adjust free size of the block if we padded. */
654daba5cc4SArtem B. Bityutskiy 	if (pad) {
6550bcc099dSDavid Woodhouse 		uint32_t waste = c->wbuf_pagesize - c->wbuf_len;
6561da177e4SLinus Torvalds 
6579c261b33SJoe Perches 		jffs2_dbg(1, "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n",
6589c261b33SJoe Perches 			  (wbuf_jeb == c->nextblock) ? "next" : "",
6599c261b33SJoe Perches 			  wbuf_jeb->offset);
6601da177e4SLinus Torvalds 
6611da177e4SLinus Torvalds 		/* wbuf_pagesize - wbuf_len is the amount of space that's to be
6621da177e4SLinus Torvalds 		   padded. If there is less free space in the block than that,
6631da177e4SLinus Torvalds 		   something screwed up */
6649bfeb691SDavid Woodhouse 		if (wbuf_jeb->free_size < waste) {
665da320f05SJoe Perches 			pr_crit("jffs2_flush_wbuf(): Accounting error. wbuf at 0x%08x has 0x%03x bytes, 0x%03x left.\n",
6660bcc099dSDavid Woodhouse 				c->wbuf_ofs, c->wbuf_len, waste);
667da320f05SJoe Perches 			pr_crit("jffs2_flush_wbuf(): But free_size for block at 0x%08x is only 0x%08x\n",
6689bfeb691SDavid Woodhouse 				wbuf_jeb->offset, wbuf_jeb->free_size);
6691da177e4SLinus Torvalds 			BUG();
6701da177e4SLinus Torvalds 		}
6710bcc099dSDavid Woodhouse 
6720bcc099dSDavid Woodhouse 		spin_lock(&c->erase_completion_lock);
6730bcc099dSDavid Woodhouse 
6749bfeb691SDavid Woodhouse 		jffs2_link_node_ref(c, wbuf_jeb, (c->wbuf_ofs + c->wbuf_len) | REF_OBSOLETE, waste, NULL);
6750bcc099dSDavid Woodhouse 		/* FIXME: that made it count as dirty. Convert to wasted */
6769bfeb691SDavid Woodhouse 		wbuf_jeb->dirty_size -= waste;
6770bcc099dSDavid Woodhouse 		c->dirty_size -= waste;
6789bfeb691SDavid Woodhouse 		wbuf_jeb->wasted_size += waste;
6790bcc099dSDavid Woodhouse 		c->wasted_size += waste;
6800bcc099dSDavid Woodhouse 	} else
6810bcc099dSDavid Woodhouse 		spin_lock(&c->erase_completion_lock);
6821da177e4SLinus Torvalds 
6831da177e4SLinus Torvalds 	/* Stick any now-obsoleted blocks on the erase_pending_list */
6841da177e4SLinus Torvalds 	jffs2_refile_wbuf_blocks(c);
6851da177e4SLinus Torvalds 	jffs2_clear_wbuf_ino_list(c);
6861da177e4SLinus Torvalds 	spin_unlock(&c->erase_completion_lock);
6871da177e4SLinus Torvalds 
6881da177e4SLinus Torvalds 	memset(c->wbuf,0xff,c->wbuf_pagesize);
6891da177e4SLinus Torvalds 	/* adjust write buffer offset, else we get a non contiguous write bug */
6901da177e4SLinus Torvalds 	c->wbuf_ofs += c->wbuf_pagesize;
6911da177e4SLinus Torvalds 	c->wbuf_len = 0;
6921da177e4SLinus Torvalds 	return 0;
6931da177e4SLinus Torvalds }
6941da177e4SLinus Torvalds 
6951da177e4SLinus Torvalds /* Trigger garbage collection to flush the write-buffer.
6961da177e4SLinus Torvalds    If ino arg is zero, do it if _any_ real (i.e. not GC) writes are
6971da177e4SLinus Torvalds    outstanding. If ino arg non-zero, do it only if a write for the
6981da177e4SLinus Torvalds    given inode is outstanding. */
6991da177e4SLinus Torvalds int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino)
7001da177e4SLinus Torvalds {
7011da177e4SLinus Torvalds 	uint32_t old_wbuf_ofs;
7021da177e4SLinus Torvalds 	uint32_t old_wbuf_len;
7031da177e4SLinus Torvalds 	int ret = 0;
7041da177e4SLinus Torvalds 
7059c261b33SJoe Perches 	jffs2_dbg(1, "jffs2_flush_wbuf_gc() called for ino #%u...\n", ino);
7061da177e4SLinus Torvalds 
7078aee6ac1SDavid Woodhouse 	if (!c->wbuf)
7088aee6ac1SDavid Woodhouse 		return 0;
7098aee6ac1SDavid Woodhouse 
710ced22070SDavid Woodhouse 	mutex_lock(&c->alloc_sem);
7111da177e4SLinus Torvalds 	if (!jffs2_wbuf_pending_for_ino(c, ino)) {
7129c261b33SJoe Perches 		jffs2_dbg(1, "Ino #%d not pending in wbuf. Returning\n", ino);
713ced22070SDavid Woodhouse 		mutex_unlock(&c->alloc_sem);
7141da177e4SLinus Torvalds 		return 0;
7151da177e4SLinus Torvalds 	}
7161da177e4SLinus Torvalds 
7171da177e4SLinus Torvalds 	old_wbuf_ofs = c->wbuf_ofs;
7181da177e4SLinus Torvalds 	old_wbuf_len = c->wbuf_len;
7191da177e4SLinus Torvalds 
7201da177e4SLinus Torvalds 	if (c->unchecked_size) {
7211da177e4SLinus Torvalds 		/* GC won't make any progress for a while */
7229c261b33SJoe Perches 		jffs2_dbg(1, "%s(): padding. Not finished checking\n",
7239c261b33SJoe Perches 			  __func__);
7241da177e4SLinus Torvalds 		down_write(&c->wbuf_sem);
7251da177e4SLinus Torvalds 		ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
7267f716cf3SEstelle Hammache 		/* retry flushing wbuf in case jffs2_wbuf_recover
7277f716cf3SEstelle Hammache 		   left some data in the wbuf */
7287f716cf3SEstelle Hammache 		if (ret)
7297f716cf3SEstelle Hammache 			ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
7301da177e4SLinus Torvalds 		up_write(&c->wbuf_sem);
7311da177e4SLinus Torvalds 	} else while (old_wbuf_len &&
7321da177e4SLinus Torvalds 		      old_wbuf_ofs == c->wbuf_ofs) {
7331da177e4SLinus Torvalds 
734ced22070SDavid Woodhouse 		mutex_unlock(&c->alloc_sem);
7351da177e4SLinus Torvalds 
7369c261b33SJoe Perches 		jffs2_dbg(1, "%s(): calls gc pass\n", __func__);
7371da177e4SLinus Torvalds 
7381da177e4SLinus Torvalds 		ret = jffs2_garbage_collect_pass(c);
7391da177e4SLinus Torvalds 		if (ret) {
7401da177e4SLinus Torvalds 			/* GC failed. Flush it with padding instead */
741ced22070SDavid Woodhouse 			mutex_lock(&c->alloc_sem);
7421da177e4SLinus Torvalds 			down_write(&c->wbuf_sem);
7431da177e4SLinus Torvalds 			ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
7447f716cf3SEstelle Hammache 			/* retry flushing wbuf in case jffs2_wbuf_recover
7457f716cf3SEstelle Hammache 			   left some data in the wbuf */
7467f716cf3SEstelle Hammache 			if (ret)
7477f716cf3SEstelle Hammache 				ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
7481da177e4SLinus Torvalds 			up_write(&c->wbuf_sem);
7491da177e4SLinus Torvalds 			break;
7501da177e4SLinus Torvalds 		}
751ced22070SDavid Woodhouse 		mutex_lock(&c->alloc_sem);
7521da177e4SLinus Torvalds 	}
7531da177e4SLinus Torvalds 
7549c261b33SJoe Perches 	jffs2_dbg(1, "%s(): ends...\n", __func__);
7551da177e4SLinus Torvalds 
756ced22070SDavid Woodhouse 	mutex_unlock(&c->alloc_sem);
7571da177e4SLinus Torvalds 	return ret;
7581da177e4SLinus Torvalds }
7591da177e4SLinus Torvalds 
7601da177e4SLinus Torvalds /* Pad write-buffer to end and write it, wasting space. */
7611da177e4SLinus Torvalds int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c)
7621da177e4SLinus Torvalds {
7631da177e4SLinus Torvalds 	int ret;
7641da177e4SLinus Torvalds 
7658aee6ac1SDavid Woodhouse 	if (!c->wbuf)
7668aee6ac1SDavid Woodhouse 		return 0;
7678aee6ac1SDavid Woodhouse 
7681da177e4SLinus Torvalds 	down_write(&c->wbuf_sem);
7691da177e4SLinus Torvalds 	ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
7707f716cf3SEstelle Hammache 	/* retry - maybe wbuf recover left some data in wbuf. */
7717f716cf3SEstelle Hammache 	if (ret)
7727f716cf3SEstelle Hammache 		ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
7731da177e4SLinus Torvalds 	up_write(&c->wbuf_sem);
7741da177e4SLinus Torvalds 
7751da177e4SLinus Torvalds 	return ret;
7761da177e4SLinus Torvalds }
7771da177e4SLinus Torvalds 
778dcb09328SThomas Gleixner static size_t jffs2_fill_wbuf(struct jffs2_sb_info *c, const uint8_t *buf,
779dcb09328SThomas Gleixner 			      size_t len)
780dcb09328SThomas Gleixner {
781dcb09328SThomas Gleixner 	if (len && !c->wbuf_len && (len >= c->wbuf_pagesize))
782dcb09328SThomas Gleixner 		return 0;
783dcb09328SThomas Gleixner 
784dcb09328SThomas Gleixner 	if (len > (c->wbuf_pagesize - c->wbuf_len))
785dcb09328SThomas Gleixner 		len = c->wbuf_pagesize - c->wbuf_len;
786dcb09328SThomas Gleixner 	memcpy(c->wbuf + c->wbuf_len, buf, len);
787dcb09328SThomas Gleixner 	c->wbuf_len += (uint32_t) len;
788dcb09328SThomas Gleixner 	return len;
789dcb09328SThomas Gleixner }
790dcb09328SThomas Gleixner 
791dcb09328SThomas Gleixner int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs,
792dcb09328SThomas Gleixner 		       unsigned long count, loff_t to, size_t *retlen,
793dcb09328SThomas Gleixner 		       uint32_t ino)
794dcb09328SThomas Gleixner {
795dcb09328SThomas Gleixner 	struct jffs2_eraseblock *jeb;
796dcb09328SThomas Gleixner 	size_t wbuf_retlen, donelen = 0;
797dcb09328SThomas Gleixner 	uint32_t outvec_to = to;
798dcb09328SThomas Gleixner 	int ret, invec;
799dcb09328SThomas Gleixner 
800dcb09328SThomas Gleixner 	/* If not writebuffered flash, don't bother */
8013be36675SAndrew Victor 	if (!jffs2_is_writebuffered(c))
8021da177e4SLinus Torvalds 		return jffs2_flash_direct_writev(c, invecs, count, to, retlen);
8031da177e4SLinus Torvalds 
8041da177e4SLinus Torvalds 	down_write(&c->wbuf_sem);
8051da177e4SLinus Torvalds 
8061da177e4SLinus Torvalds 	/* If wbuf_ofs is not initialized, set it to target address */
8071da177e4SLinus Torvalds 	if (c->wbuf_ofs == 0xFFFFFFFF) {
8081da177e4SLinus Torvalds 		c->wbuf_ofs = PAGE_DIV(to);
8091da177e4SLinus Torvalds 		c->wbuf_len = PAGE_MOD(to);
8101da177e4SLinus Torvalds 		memset(c->wbuf,0xff,c->wbuf_pagesize);
8111da177e4SLinus Torvalds 	}
8121da177e4SLinus Torvalds 
813dcb09328SThomas Gleixner 	/*
814dcb09328SThomas Gleixner 	 * Sanity checks on target address.  It's permitted to write
815dcb09328SThomas Gleixner 	 * at PAD(c->wbuf_len+c->wbuf_ofs), and it's permitted to
816dcb09328SThomas Gleixner 	 * write at the beginning of a new erase block. Anything else,
817dcb09328SThomas Gleixner 	 * and you die.  New block starts at xxx000c (0-b = block
818dcb09328SThomas Gleixner 	 * header)
8191da177e4SLinus Torvalds 	 */
8203be36675SAndrew Victor 	if (SECTOR_ADDR(to) != SECTOR_ADDR(c->wbuf_ofs)) {
8211da177e4SLinus Torvalds 		/* It's a write to a new block */
8221da177e4SLinus Torvalds 		if (c->wbuf_len) {
8239c261b33SJoe Perches 			jffs2_dbg(1, "%s(): to 0x%lx causes flush of wbuf at 0x%08x\n",
8249c261b33SJoe Perches 				  __func__, (unsigned long)to, c->wbuf_ofs);
8251da177e4SLinus Torvalds 			ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
826dcb09328SThomas Gleixner 			if (ret)
827dcb09328SThomas Gleixner 				goto outerr;
8281da177e4SLinus Torvalds 		}
8291da177e4SLinus Torvalds 		/* set pointer to new block */
8301da177e4SLinus Torvalds 		c->wbuf_ofs = PAGE_DIV(to);
8311da177e4SLinus Torvalds 		c->wbuf_len = PAGE_MOD(to);
8321da177e4SLinus Torvalds 	}
8331da177e4SLinus Torvalds 
8341da177e4SLinus Torvalds 	if (to != PAD(c->wbuf_ofs + c->wbuf_len)) {
8351da177e4SLinus Torvalds 		/* We're not writing immediately after the writebuffer. Bad. */
836da320f05SJoe Perches 		pr_crit("%s(): Non-contiguous write to %08lx\n",
8379c261b33SJoe Perches 			__func__, (unsigned long)to);
8381da177e4SLinus Torvalds 		if (c->wbuf_len)
839da320f05SJoe Perches 			pr_crit("wbuf was previously %08x-%08x\n",
8401da177e4SLinus Torvalds 				c->wbuf_ofs, c->wbuf_ofs + c->wbuf_len);
8411da177e4SLinus Torvalds 		BUG();
8421da177e4SLinus Torvalds 	}
8431da177e4SLinus Torvalds 
8441da177e4SLinus Torvalds 	/* adjust alignment offset */
8451da177e4SLinus Torvalds 	if (c->wbuf_len != PAGE_MOD(to)) {
8461da177e4SLinus Torvalds 		c->wbuf_len = PAGE_MOD(to);
8471da177e4SLinus Torvalds 		/* take care of alignment to next page */
848dcb09328SThomas Gleixner 		if (!c->wbuf_len) {
8491da177e4SLinus Torvalds 			c->wbuf_len = c->wbuf_pagesize;
8501da177e4SLinus Torvalds 			ret = __jffs2_flush_wbuf(c, NOPAD);
851dcb09328SThomas Gleixner 			if (ret)
852dcb09328SThomas Gleixner 				goto outerr;
8531da177e4SLinus Torvalds 		}
8541da177e4SLinus Torvalds 	}
8551da177e4SLinus Torvalds 
856dcb09328SThomas Gleixner 	for (invec = 0; invec < count; invec++) {
857dcb09328SThomas Gleixner 		int vlen = invecs[invec].iov_len;
858dcb09328SThomas Gleixner 		uint8_t *v = invecs[invec].iov_base;
8591da177e4SLinus Torvalds 
860dcb09328SThomas Gleixner 		wbuf_retlen = jffs2_fill_wbuf(c, v, vlen);
8611da177e4SLinus Torvalds 
862dcb09328SThomas Gleixner 		if (c->wbuf_len == c->wbuf_pagesize) {
863dcb09328SThomas Gleixner 			ret = __jffs2_flush_wbuf(c, NOPAD);
864dcb09328SThomas Gleixner 			if (ret)
865dcb09328SThomas Gleixner 				goto outerr;
8661da177e4SLinus Torvalds 		}
867dcb09328SThomas Gleixner 		vlen -= wbuf_retlen;
868dcb09328SThomas Gleixner 		outvec_to += wbuf_retlen;
8691da177e4SLinus Torvalds 		donelen += wbuf_retlen;
870dcb09328SThomas Gleixner 		v += wbuf_retlen;
8711da177e4SLinus Torvalds 
872dcb09328SThomas Gleixner 		if (vlen >= c->wbuf_pagesize) {
873eda95cbfSArtem Bityutskiy 			ret = mtd_write(c->mtd, outvec_to, PAGE_DIV(vlen),
874dcb09328SThomas Gleixner 					&wbuf_retlen, v);
875dcb09328SThomas Gleixner 			if (ret < 0 || wbuf_retlen != PAGE_DIV(vlen))
876dcb09328SThomas Gleixner 				goto outfile;
877dcb09328SThomas Gleixner 
878dcb09328SThomas Gleixner 			vlen -= wbuf_retlen;
879dcb09328SThomas Gleixner 			outvec_to += wbuf_retlen;
880dcb09328SThomas Gleixner 			c->wbuf_ofs = outvec_to;
881dcb09328SThomas Gleixner 			donelen += wbuf_retlen;
882dcb09328SThomas Gleixner 			v += wbuf_retlen;
8831da177e4SLinus Torvalds 		}
8841da177e4SLinus Torvalds 
885dcb09328SThomas Gleixner 		wbuf_retlen = jffs2_fill_wbuf(c, v, vlen);
886dcb09328SThomas Gleixner 		if (c->wbuf_len == c->wbuf_pagesize) {
887dcb09328SThomas Gleixner 			ret = __jffs2_flush_wbuf(c, NOPAD);
888dcb09328SThomas Gleixner 			if (ret)
889dcb09328SThomas Gleixner 				goto outerr;
8901da177e4SLinus Torvalds 		}
8911da177e4SLinus Torvalds 
892dcb09328SThomas Gleixner 		outvec_to += wbuf_retlen;
893dcb09328SThomas Gleixner 		donelen += wbuf_retlen;
8941da177e4SLinus Torvalds 	}
8951da177e4SLinus Torvalds 
896dcb09328SThomas Gleixner 	/*
897dcb09328SThomas Gleixner 	 * If there's a remainder in the wbuf and it's a non-GC write,
898dcb09328SThomas Gleixner 	 * remember that the wbuf affects this ino
899dcb09328SThomas Gleixner 	 */
9001da177e4SLinus Torvalds 	*retlen = donelen;
9011da177e4SLinus Torvalds 
902e631ddbaSFerenc Havasi 	if (jffs2_sum_active()) {
903e631ddbaSFerenc Havasi 		int res = jffs2_sum_add_kvec(c, invecs, count, (uint32_t) to);
904e631ddbaSFerenc Havasi 		if (res)
905e631ddbaSFerenc Havasi 			return res;
906e631ddbaSFerenc Havasi 	}
907e631ddbaSFerenc Havasi 
9081da177e4SLinus Torvalds 	if (c->wbuf_len && ino)
9091da177e4SLinus Torvalds 		jffs2_wbuf_dirties_inode(c, ino);
9101da177e4SLinus Torvalds 
9111da177e4SLinus Torvalds 	ret = 0;
912dcb09328SThomas Gleixner 	up_write(&c->wbuf_sem);
913dcb09328SThomas Gleixner 	return ret;
9141da177e4SLinus Torvalds 
915dcb09328SThomas Gleixner outfile:
916dcb09328SThomas Gleixner 	/*
917dcb09328SThomas Gleixner 	 * At this point we have no problem, c->wbuf is empty. However
918dcb09328SThomas Gleixner 	 * refile nextblock to avoid writing again to same address.
919dcb09328SThomas Gleixner 	 */
920dcb09328SThomas Gleixner 
921dcb09328SThomas Gleixner 	spin_lock(&c->erase_completion_lock);
922dcb09328SThomas Gleixner 
923dcb09328SThomas Gleixner 	jeb = &c->blocks[outvec_to / c->sector_size];
924dcb09328SThomas Gleixner 	jffs2_block_refile(c, jeb, REFILE_ANYWAY);
925dcb09328SThomas Gleixner 
926dcb09328SThomas Gleixner 	spin_unlock(&c->erase_completion_lock);
927dcb09328SThomas Gleixner 
928dcb09328SThomas Gleixner outerr:
929dcb09328SThomas Gleixner 	*retlen = 0;
9301da177e4SLinus Torvalds 	up_write(&c->wbuf_sem);
9311da177e4SLinus Torvalds 	return ret;
9321da177e4SLinus Torvalds }
9331da177e4SLinus Torvalds 
9341da177e4SLinus Torvalds /*
9351da177e4SLinus Torvalds  *	This is the entry for flash write.
9361da177e4SLinus Torvalds  *	Check, if we work on NAND FLASH, if so build an kvec and write it via vritev
9371da177e4SLinus Torvalds */
9389bfeb691SDavid Woodhouse int jffs2_flash_write(struct jffs2_sb_info *c, loff_t ofs, size_t len,
9399bfeb691SDavid Woodhouse 		      size_t *retlen, const u_char *buf)
9401da177e4SLinus Torvalds {
9411da177e4SLinus Torvalds 	struct kvec vecs[1];
9421da177e4SLinus Torvalds 
9433be36675SAndrew Victor 	if (!jffs2_is_writebuffered(c))
944e631ddbaSFerenc Havasi 		return jffs2_flash_direct_write(c, ofs, len, retlen, buf);
9451da177e4SLinus Torvalds 
9461da177e4SLinus Torvalds 	vecs[0].iov_base = (unsigned char *) buf;
9471da177e4SLinus Torvalds 	vecs[0].iov_len = len;
9481da177e4SLinus Torvalds 	return jffs2_flash_writev(c, vecs, 1, ofs, retlen, 0);
9491da177e4SLinus Torvalds }
9501da177e4SLinus Torvalds 
9511da177e4SLinus Torvalds /*
9521da177e4SLinus Torvalds 	Handle readback from writebuffer and ECC failure return
9531da177e4SLinus Torvalds */
9541da177e4SLinus Torvalds int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, u_char *buf)
9551da177e4SLinus Torvalds {
9561da177e4SLinus Torvalds 	loff_t	orbf = 0, owbf = 0, lwbf = 0;
9571da177e4SLinus Torvalds 	int	ret;
9581da177e4SLinus Torvalds 
9593be36675SAndrew Victor 	if (!jffs2_is_writebuffered(c))
960329ad399SArtem Bityutskiy 		return mtd_read(c->mtd, ofs, len, retlen, buf);
9611da177e4SLinus Torvalds 
9623be36675SAndrew Victor 	/* Read flash */
963894214d1SArtem B. Bityuckiy 	down_read(&c->wbuf_sem);
964329ad399SArtem Bityutskiy 	ret = mtd_read(c->mtd, ofs, len, retlen, buf);
9651da177e4SLinus Torvalds 
9669a1fcdfdSThomas Gleixner 	if ( (ret == -EBADMSG || ret == -EUCLEAN) && (*retlen == len) ) {
9679a1fcdfdSThomas Gleixner 		if (ret == -EBADMSG)
968da320f05SJoe Perches 			pr_warn("mtd->read(0x%zx bytes from 0x%llx) returned ECC error\n",
969da320f05SJoe Perches 				len, ofs);
9701da177e4SLinus Torvalds 		/*
9719a1fcdfdSThomas Gleixner 		 * We have the raw data without ECC correction in the buffer,
9729a1fcdfdSThomas Gleixner 		 * maybe we are lucky and all data or parts are correct. We
9739a1fcdfdSThomas Gleixner 		 * check the node.  If data are corrupted node check will sort
9749a1fcdfdSThomas Gleixner 		 * it out.  We keep this block, it will fail on write or erase
9759a1fcdfdSThomas Gleixner 		 * and the we mark it bad. Or should we do that now? But we
9769a1fcdfdSThomas Gleixner 		 * should give him a chance.  Maybe we had a system crash or
9779a1fcdfdSThomas Gleixner 		 * power loss before the ecc write or a erase was completed.
9781da177e4SLinus Torvalds 		 * So we return success. :)
9791da177e4SLinus Torvalds 		 */
9801da177e4SLinus Torvalds 		ret = 0;
9811da177e4SLinus Torvalds 	}
9821da177e4SLinus Torvalds 
9831da177e4SLinus Torvalds 	/* if no writebuffer available or write buffer empty, return */
9841da177e4SLinus Torvalds 	if (!c->wbuf_pagesize || !c->wbuf_len)
985894214d1SArtem B. Bityuckiy 		goto exit;
9861da177e4SLinus Torvalds 
9871da177e4SLinus Torvalds 	/* if we read in a different block, return */
9883be36675SAndrew Victor 	if (SECTOR_ADDR(ofs) != SECTOR_ADDR(c->wbuf_ofs))
989894214d1SArtem B. Bityuckiy 		goto exit;
9901da177e4SLinus Torvalds 
9911da177e4SLinus Torvalds 	if (ofs >= c->wbuf_ofs) {
9921da177e4SLinus Torvalds 		owbf = (ofs - c->wbuf_ofs);	/* offset in write buffer */
9931da177e4SLinus Torvalds 		if (owbf > c->wbuf_len)		/* is read beyond write buffer ? */
9941da177e4SLinus Torvalds 			goto exit;
9951da177e4SLinus Torvalds 		lwbf = c->wbuf_len - owbf;	/* number of bytes to copy */
9961da177e4SLinus Torvalds 		if (lwbf > len)
9971da177e4SLinus Torvalds 			lwbf = len;
9981da177e4SLinus Torvalds 	} else {
9991da177e4SLinus Torvalds 		orbf = (c->wbuf_ofs - ofs);	/* offset in read buffer */
10001da177e4SLinus Torvalds 		if (orbf > len)			/* is write beyond write buffer ? */
10011da177e4SLinus Torvalds 			goto exit;
10021da177e4SLinus Torvalds 		lwbf = len - orbf;		/* number of bytes to copy */
10031da177e4SLinus Torvalds 		if (lwbf > c->wbuf_len)
10041da177e4SLinus Torvalds 			lwbf = c->wbuf_len;
10051da177e4SLinus Torvalds 	}
10061da177e4SLinus Torvalds 	if (lwbf > 0)
10071da177e4SLinus Torvalds 		memcpy(buf+orbf,c->wbuf+owbf,lwbf);
10081da177e4SLinus Torvalds 
10091da177e4SLinus Torvalds exit:
10101da177e4SLinus Torvalds 	up_read(&c->wbuf_sem);
10111da177e4SLinus Torvalds 	return ret;
10121da177e4SLinus Torvalds }
10131da177e4SLinus Torvalds 
10148593fbc6SThomas Gleixner #define NR_OOB_SCAN_PAGES 4
10158593fbc6SThomas Gleixner 
101609b3fba5SDavid Woodhouse /* For historical reasons we use only 8 bytes for OOB clean marker */
101709b3fba5SDavid Woodhouse #define OOB_CM_SIZE 8
1018a7a6ace1SArtem Bityutskiy 
1019a7a6ace1SArtem Bityutskiy static const struct jffs2_unknown_node oob_cleanmarker =
1020a7a6ace1SArtem Bityutskiy {
1021566865a2SDavid Woodhouse 	.magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
1022566865a2SDavid Woodhouse 	.nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
1023566865a2SDavid Woodhouse 	.totlen = constant_cpu_to_je32(8)
1024a7a6ace1SArtem Bityutskiy };
1025a7a6ace1SArtem Bityutskiy 
10261da177e4SLinus Torvalds /*
1027a7a6ace1SArtem Bityutskiy  * Check, if the out of band area is empty. This function knows about the clean
1028a7a6ace1SArtem Bityutskiy  * marker and if it is present in OOB, treats the OOB as empty anyway.
10291da177e4SLinus Torvalds  */
10308593fbc6SThomas Gleixner int jffs2_check_oob_empty(struct jffs2_sb_info *c,
10318593fbc6SThomas Gleixner 			  struct jffs2_eraseblock *jeb, int mode)
10321da177e4SLinus Torvalds {
1033a7a6ace1SArtem Bityutskiy 	int i, ret;
1034a7a6ace1SArtem Bityutskiy 	int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
10358593fbc6SThomas Gleixner 	struct mtd_oob_ops ops;
10361da177e4SLinus Torvalds 
10370612b9ddSBrian Norris 	ops.mode = MTD_OPS_AUTO_OOB;
1038a7a6ace1SArtem Bityutskiy 	ops.ooblen = NR_OOB_SCAN_PAGES * c->oobavail;
10398593fbc6SThomas Gleixner 	ops.oobbuf = c->oobbuf;
1040a7a6ace1SArtem Bityutskiy 	ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
10418593fbc6SThomas Gleixner 	ops.datbuf = NULL;
10428593fbc6SThomas Gleixner 
1043fd2819bbSArtem Bityutskiy 	ret = mtd_read_oob(c->mtd, jeb->offset, &ops);
1044a7a6ace1SArtem Bityutskiy 	if (ret || ops.oobretlen != ops.ooblen) {
1045da320f05SJoe Perches 		pr_err("cannot read OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n",
10467be26bfbSAndrew Morton 		       jeb->offset, ops.ooblen, ops.oobretlen, ret);
1047a7a6ace1SArtem Bityutskiy 		if (!ret)
1048a7a6ace1SArtem Bityutskiy 			ret = -EIO;
10498593fbc6SThomas Gleixner 		return ret;
10501da177e4SLinus Torvalds 	}
10511da177e4SLinus Torvalds 
1052a7a6ace1SArtem Bityutskiy 	for(i = 0; i < ops.ooblen; i++) {
1053a7a6ace1SArtem Bityutskiy 		if (mode && i < cmlen)
1054a7a6ace1SArtem Bityutskiy 			/* Yeah, we know about the cleanmarker */
10551da177e4SLinus Torvalds 			continue;
10561da177e4SLinus Torvalds 
10578593fbc6SThomas Gleixner 		if (ops.oobbuf[i] != 0xFF) {
10589c261b33SJoe Perches 			jffs2_dbg(2, "Found %02x at %x in OOB for "
10599c261b33SJoe Perches 				  "%08x\n", ops.oobbuf[i], i, jeb->offset);
10608593fbc6SThomas Gleixner 			return 1;
10611da177e4SLinus Torvalds 		}
10621da177e4SLinus Torvalds 	}
10631da177e4SLinus Torvalds 
10648593fbc6SThomas Gleixner 	return 0;
10651da177e4SLinus Torvalds }
10661da177e4SLinus Torvalds 
10671da177e4SLinus Torvalds /*
1068a7a6ace1SArtem Bityutskiy  * Check for a valid cleanmarker.
1069a7a6ace1SArtem Bityutskiy  * Returns: 0 if a valid cleanmarker was found
1070a7a6ace1SArtem Bityutskiy  *	    1 if no cleanmarker was found
1071a7a6ace1SArtem Bityutskiy  *	    negative error code if an error occurred
10721da177e4SLinus Torvalds  */
10738593fbc6SThomas Gleixner int jffs2_check_nand_cleanmarker(struct jffs2_sb_info *c,
10748593fbc6SThomas Gleixner 				 struct jffs2_eraseblock *jeb)
10751da177e4SLinus Torvalds {
10768593fbc6SThomas Gleixner 	struct mtd_oob_ops ops;
1077a7a6ace1SArtem Bityutskiy 	int ret, cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
10781da177e4SLinus Torvalds 
10790612b9ddSBrian Norris 	ops.mode = MTD_OPS_AUTO_OOB;
1080a7a6ace1SArtem Bityutskiy 	ops.ooblen = cmlen;
10818593fbc6SThomas Gleixner 	ops.oobbuf = c->oobbuf;
1082a7a6ace1SArtem Bityutskiy 	ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
10838593fbc6SThomas Gleixner 	ops.datbuf = NULL;
10848593fbc6SThomas Gleixner 
1085fd2819bbSArtem Bityutskiy 	ret = mtd_read_oob(c->mtd, jeb->offset, &ops);
1086a7a6ace1SArtem Bityutskiy 	if (ret || ops.oobretlen != ops.ooblen) {
1087da320f05SJoe Perches 		pr_err("cannot read OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n",
10887be26bfbSAndrew Morton 		       jeb->offset, ops.ooblen, ops.oobretlen, ret);
1089a7a6ace1SArtem Bityutskiy 		if (!ret)
1090a7a6ace1SArtem Bityutskiy 			ret = -EIO;
10911da177e4SLinus Torvalds 		return ret;
10921da177e4SLinus Torvalds 	}
10938593fbc6SThomas Gleixner 
1094a7a6ace1SArtem Bityutskiy 	return !!memcmp(&oob_cleanmarker, c->oobbuf, cmlen);
10951da177e4SLinus Torvalds }
10961da177e4SLinus Torvalds 
10978593fbc6SThomas Gleixner int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c,
10988593fbc6SThomas Gleixner 				 struct jffs2_eraseblock *jeb)
10991da177e4SLinus Torvalds {
11001da177e4SLinus Torvalds 	int ret;
11018593fbc6SThomas Gleixner 	struct mtd_oob_ops ops;
1102a7a6ace1SArtem Bityutskiy 	int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
11031da177e4SLinus Torvalds 
11040612b9ddSBrian Norris 	ops.mode = MTD_OPS_AUTO_OOB;
1105a7a6ace1SArtem Bityutskiy 	ops.ooblen = cmlen;
1106a7a6ace1SArtem Bityutskiy 	ops.oobbuf = (uint8_t *)&oob_cleanmarker;
1107a7a6ace1SArtem Bityutskiy 	ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
11088593fbc6SThomas Gleixner 	ops.datbuf = NULL;
11098593fbc6SThomas Gleixner 
1110a2cc5ba0SArtem Bityutskiy 	ret = mtd_write_oob(c->mtd, jeb->offset, &ops);
1111a7a6ace1SArtem Bityutskiy 	if (ret || ops.oobretlen != ops.ooblen) {
1112da320f05SJoe Perches 		pr_err("cannot write OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n",
11137be26bfbSAndrew Morton 		       jeb->offset, ops.ooblen, ops.oobretlen, ret);
1114a7a6ace1SArtem Bityutskiy 		if (!ret)
1115a7a6ace1SArtem Bityutskiy 			ret = -EIO;
11161da177e4SLinus Torvalds 		return ret;
11171da177e4SLinus Torvalds 	}
1118a7a6ace1SArtem Bityutskiy 
11191da177e4SLinus Torvalds 	return 0;
11201da177e4SLinus Torvalds }
11211da177e4SLinus Torvalds 
11221da177e4SLinus Torvalds /*
11231da177e4SLinus Torvalds  * On NAND we try to mark this block bad. If the block was erased more
112425985edcSLucas De Marchi  * than MAX_ERASE_FAILURES we mark it finally bad.
11251da177e4SLinus Torvalds  * Don't care about failures. This block remains on the erase-pending
11261da177e4SLinus Torvalds  * or badblock list as long as nobody manipulates the flash with
11271da177e4SLinus Torvalds  * a bootloader or something like that.
11281da177e4SLinus Torvalds  */
11291da177e4SLinus Torvalds 
11301da177e4SLinus Torvalds int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset)
11311da177e4SLinus Torvalds {
11321da177e4SLinus Torvalds 	int 	ret;
11331da177e4SLinus Torvalds 
11341da177e4SLinus Torvalds 	/* if the count is < max, we try to write the counter to the 2nd page oob area */
11351da177e4SLinus Torvalds 	if( ++jeb->bad_count < MAX_ERASE_FAILURES)
11361da177e4SLinus Torvalds 		return 0;
11371da177e4SLinus Torvalds 
1138da320f05SJoe Perches 	pr_warn("JFFS2: marking eraseblock at %08x as bad\n", bad_offset);
11395942ddbcSArtem Bityutskiy 	ret = mtd_block_markbad(c->mtd, bad_offset);
11401da177e4SLinus Torvalds 
11411da177e4SLinus Torvalds 	if (ret) {
11429c261b33SJoe Perches 		jffs2_dbg(1, "%s(): Write failed for block at %08x: error %d\n",
11439c261b33SJoe Perches 			  __func__, jeb->offset, ret);
11441da177e4SLinus Torvalds 		return ret;
11451da177e4SLinus Torvalds 	}
11461da177e4SLinus Torvalds 	return 1;
11471da177e4SLinus Torvalds }
11481da177e4SLinus Torvalds 
1149a7a6ace1SArtem Bityutskiy int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
11501da177e4SLinus Torvalds {
11515bd34c09SThomas Gleixner 	struct nand_ecclayout *oinfo = c->mtd->ecclayout;
11521da177e4SLinus Torvalds 
11531da177e4SLinus Torvalds 	if (!c->mtd->oobsize)
11541da177e4SLinus Torvalds 		return 0;
11551da177e4SLinus Torvalds 
11561da177e4SLinus Torvalds 	/* Cleanmarker is out-of-band, so inline size zero */
11571da177e4SLinus Torvalds 	c->cleanmarker_size = 0;
11581da177e4SLinus Torvalds 
1159a7a6ace1SArtem Bityutskiy 	if (!oinfo || oinfo->oobavail == 0) {
1160da320f05SJoe Perches 		pr_err("inconsistent device description\n");
11611da177e4SLinus Torvalds 		return -EINVAL;
11621da177e4SLinus Torvalds 	}
11635bd34c09SThomas Gleixner 
11649c261b33SJoe Perches 	jffs2_dbg(1, "JFFS2 using OOB on NAND\n");
11655bd34c09SThomas Gleixner 
1166a7a6ace1SArtem Bityutskiy 	c->oobavail = oinfo->oobavail;
11671da177e4SLinus Torvalds 
11681da177e4SLinus Torvalds 	/* Initialise write buffer */
11691da177e4SLinus Torvalds 	init_rwsem(&c->wbuf_sem);
117028318776SJoern Engel 	c->wbuf_pagesize = c->mtd->writesize;
11711da177e4SLinus Torvalds 	c->wbuf_ofs = 0xFFFFFFFF;
11721da177e4SLinus Torvalds 
11731da177e4SLinus Torvalds 	c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
11741da177e4SLinus Torvalds 	if (!c->wbuf)
11751da177e4SLinus Torvalds 		return -ENOMEM;
11761da177e4SLinus Torvalds 
1177a7a6ace1SArtem Bityutskiy 	c->oobbuf = kmalloc(NR_OOB_SCAN_PAGES * c->oobavail, GFP_KERNEL);
1178a7a6ace1SArtem Bityutskiy 	if (!c->oobbuf) {
11791da177e4SLinus Torvalds 		kfree(c->wbuf);
11801da177e4SLinus Torvalds 		return -ENOMEM;
11811da177e4SLinus Torvalds 	}
1182a7a6ace1SArtem Bityutskiy 
1183a6bc432eSDavid Woodhouse #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1184a6bc432eSDavid Woodhouse 	c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1185a6bc432eSDavid Woodhouse 	if (!c->wbuf_verify) {
1186a6bc432eSDavid Woodhouse 		kfree(c->oobbuf);
1187a6bc432eSDavid Woodhouse 		kfree(c->wbuf);
1188a6bc432eSDavid Woodhouse 		return -ENOMEM;
1189a6bc432eSDavid Woodhouse 	}
1190a6bc432eSDavid Woodhouse #endif
1191a7a6ace1SArtem Bityutskiy 	return 0;
11921da177e4SLinus Torvalds }
11931da177e4SLinus Torvalds 
11941da177e4SLinus Torvalds void jffs2_nand_flash_cleanup(struct jffs2_sb_info *c)
11951da177e4SLinus Torvalds {
1196a6bc432eSDavid Woodhouse #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1197a6bc432eSDavid Woodhouse 	kfree(c->wbuf_verify);
1198a6bc432eSDavid Woodhouse #endif
11991da177e4SLinus Torvalds 	kfree(c->wbuf);
12008593fbc6SThomas Gleixner 	kfree(c->oobbuf);
12011da177e4SLinus Torvalds }
12021da177e4SLinus Torvalds 
12038f15fd55SAndrew Victor int jffs2_dataflash_setup(struct jffs2_sb_info *c) {
12048f15fd55SAndrew Victor 	c->cleanmarker_size = 0;		/* No cleanmarkers needed */
12058f15fd55SAndrew Victor 
12068f15fd55SAndrew Victor 	/* Initialize write buffer */
12078f15fd55SAndrew Victor 	init_rwsem(&c->wbuf_sem);
12088f15fd55SAndrew Victor 
1209daba5cc4SArtem B. Bityutskiy 
1210daba5cc4SArtem B. Bityutskiy 	c->wbuf_pagesize =  c->mtd->erasesize;
1211daba5cc4SArtem B. Bityutskiy 
1212daba5cc4SArtem B. Bityutskiy 	/* Find a suitable c->sector_size
1213daba5cc4SArtem B. Bityutskiy 	 * - Not too much sectors
1214daba5cc4SArtem B. Bityutskiy 	 * - Sectors have to be at least 4 K + some bytes
1215daba5cc4SArtem B. Bityutskiy 	 * - All known dataflashes have erase sizes of 528 or 1056
1216daba5cc4SArtem B. Bityutskiy 	 * - we take at least 8 eraseblocks and want to have at least 8K size
1217daba5cc4SArtem B. Bityutskiy 	 * - The concatenation should be a power of 2
1218daba5cc4SArtem B. Bityutskiy 	*/
1219daba5cc4SArtem B. Bityutskiy 
1220daba5cc4SArtem B. Bityutskiy 	c->sector_size = 8 * c->mtd->erasesize;
1221daba5cc4SArtem B. Bityutskiy 
1222daba5cc4SArtem B. Bityutskiy 	while (c->sector_size < 8192) {
1223daba5cc4SArtem B. Bityutskiy 		c->sector_size *= 2;
1224daba5cc4SArtem B. Bityutskiy 	}
1225daba5cc4SArtem B. Bityutskiy 
1226daba5cc4SArtem B. Bityutskiy 	/* It may be necessary to adjust the flash size */
1227daba5cc4SArtem B. Bityutskiy 	c->flash_size = c->mtd->size;
1228daba5cc4SArtem B. Bityutskiy 
1229daba5cc4SArtem B. Bityutskiy 	if ((c->flash_size % c->sector_size) != 0) {
1230daba5cc4SArtem B. Bityutskiy 		c->flash_size = (c->flash_size / c->sector_size) * c->sector_size;
1231da320f05SJoe Perches 		pr_warn("JFFS2 flash size adjusted to %dKiB\n", c->flash_size);
1232daba5cc4SArtem B. Bityutskiy 	};
1233daba5cc4SArtem B. Bityutskiy 
1234daba5cc4SArtem B. Bityutskiy 	c->wbuf_ofs = 0xFFFFFFFF;
12358f15fd55SAndrew Victor 	c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
12368f15fd55SAndrew Victor 	if (!c->wbuf)
12378f15fd55SAndrew Victor 		return -ENOMEM;
12388f15fd55SAndrew Victor 
1239cca15841Smichael #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1240cca15841Smichael 	c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1241cca15841Smichael 	if (!c->wbuf_verify) {
1242cca15841Smichael 		kfree(c->oobbuf);
1243cca15841Smichael 		kfree(c->wbuf);
1244cca15841Smichael 		return -ENOMEM;
1245cca15841Smichael 	}
1246cca15841Smichael #endif
1247cca15841Smichael 
1248da320f05SJoe Perches 	pr_info("JFFS2 write-buffering enabled buffer (%d) erasesize (%d)\n",
1249da320f05SJoe Perches 		c->wbuf_pagesize, c->sector_size);
12508f15fd55SAndrew Victor 
12518f15fd55SAndrew Victor 	return 0;
12528f15fd55SAndrew Victor }
12538f15fd55SAndrew Victor 
12548f15fd55SAndrew Victor void jffs2_dataflash_cleanup(struct jffs2_sb_info *c) {
1255cca15841Smichael #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1256cca15841Smichael 	kfree(c->wbuf_verify);
1257cca15841Smichael #endif
12588f15fd55SAndrew Victor 	kfree(c->wbuf);
12598f15fd55SAndrew Victor }
12608f15fd55SAndrew Victor 
126159da721aSNicolas Pitre int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) {
1262c8b229deSJoern Engel 	/* Cleanmarker currently occupies whole programming regions,
1263c8b229deSJoern Engel 	 * either one or 2 for 8Byte STMicro flashes. */
1264c8b229deSJoern Engel 	c->cleanmarker_size = max(16u, c->mtd->writesize);
126559da721aSNicolas Pitre 
126659da721aSNicolas Pitre 	/* Initialize write buffer */
126759da721aSNicolas Pitre 	init_rwsem(&c->wbuf_sem);
126828318776SJoern Engel 	c->wbuf_pagesize = c->mtd->writesize;
126959da721aSNicolas Pitre 	c->wbuf_ofs = 0xFFFFFFFF;
127059da721aSNicolas Pitre 
127159da721aSNicolas Pitre 	c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
127259da721aSNicolas Pitre 	if (!c->wbuf)
127359da721aSNicolas Pitre 		return -ENOMEM;
127459da721aSNicolas Pitre 
1275bc8cec0dSMassimo Cirillo #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1276bc8cec0dSMassimo Cirillo 	c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1277bc8cec0dSMassimo Cirillo 	if (!c->wbuf_verify) {
1278bc8cec0dSMassimo Cirillo 		kfree(c->wbuf);
1279bc8cec0dSMassimo Cirillo 		return -ENOMEM;
1280bc8cec0dSMassimo Cirillo 	}
1281bc8cec0dSMassimo Cirillo #endif
128259da721aSNicolas Pitre 	return 0;
128359da721aSNicolas Pitre }
128459da721aSNicolas Pitre 
128559da721aSNicolas Pitre void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c) {
1286bc8cec0dSMassimo Cirillo #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1287bc8cec0dSMassimo Cirillo 	kfree(c->wbuf_verify);
1288bc8cec0dSMassimo Cirillo #endif
128959da721aSNicolas Pitre 	kfree(c->wbuf);
129059da721aSNicolas Pitre }
12910029da3bSArtem Bityutskiy 
12920029da3bSArtem Bityutskiy int jffs2_ubivol_setup(struct jffs2_sb_info *c) {
12930029da3bSArtem Bityutskiy 	c->cleanmarker_size = 0;
12940029da3bSArtem Bityutskiy 
12950029da3bSArtem Bityutskiy 	if (c->mtd->writesize == 1)
12960029da3bSArtem Bityutskiy 		/* We do not need write-buffer */
12970029da3bSArtem Bityutskiy 		return 0;
12980029da3bSArtem Bityutskiy 
12990029da3bSArtem Bityutskiy 	init_rwsem(&c->wbuf_sem);
13000029da3bSArtem Bityutskiy 
13010029da3bSArtem Bityutskiy 	c->wbuf_pagesize =  c->mtd->writesize;
13020029da3bSArtem Bityutskiy 	c->wbuf_ofs = 0xFFFFFFFF;
13030029da3bSArtem Bityutskiy 	c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
13040029da3bSArtem Bityutskiy 	if (!c->wbuf)
13050029da3bSArtem Bityutskiy 		return -ENOMEM;
13060029da3bSArtem Bityutskiy 
1307da320f05SJoe Perches 	pr_info("JFFS2 write-buffering enabled buffer (%d) erasesize (%d)\n",
1308da320f05SJoe Perches 		c->wbuf_pagesize, c->sector_size);
13090029da3bSArtem Bityutskiy 
13100029da3bSArtem Bityutskiy 	return 0;
13110029da3bSArtem Bityutskiy }
13120029da3bSArtem Bityutskiy 
13130029da3bSArtem Bityutskiy void jffs2_ubivol_cleanup(struct jffs2_sb_info *c) {
13140029da3bSArtem Bityutskiy 	kfree(c->wbuf);
13150029da3bSArtem Bityutskiy }
1316