xref: /openbmc/linux/fs/jffs2/wbuf.c (revision 5a528957)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * JFFS2 -- Journalling Flash File System, Version 2.
31da177e4SLinus Torvalds  *
4c00c310eSDavid Woodhouse  * Copyright © 2001-2007 Red Hat, Inc.
5c00c310eSDavid Woodhouse  * Copyright © 2004 Thomas Gleixner <tglx@linutronix.de>
61da177e4SLinus Torvalds  *
71da177e4SLinus Torvalds  * Created by David Woodhouse <dwmw2@infradead.org>
81da177e4SLinus Torvalds  * Modified debugged and enhanced by Thomas Gleixner <tglx@linutronix.de>
91da177e4SLinus Torvalds  *
101da177e4SLinus Torvalds  * For licensing information, see the file 'LICENCE' in this directory.
111da177e4SLinus Torvalds  *
121da177e4SLinus Torvalds  */
131da177e4SLinus Torvalds 
145a528957SJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
155a528957SJoe Perches 
161da177e4SLinus Torvalds #include <linux/kernel.h>
171da177e4SLinus Torvalds #include <linux/slab.h>
181da177e4SLinus Torvalds #include <linux/mtd/mtd.h>
191da177e4SLinus Torvalds #include <linux/crc32.h>
201da177e4SLinus Torvalds #include <linux/mtd/nand.h>
214e57b681STim Schmielau #include <linux/jiffies.h>
22914e2637SAl Viro #include <linux/sched.h>
234e57b681STim Schmielau 
241da177e4SLinus Torvalds #include "nodelist.h"
251da177e4SLinus Torvalds 
261da177e4SLinus Torvalds /* For testing write failures */
271da177e4SLinus Torvalds #undef BREAKME
281da177e4SLinus Torvalds #undef BREAKMEHEADER
291da177e4SLinus Torvalds 
301da177e4SLinus Torvalds #ifdef BREAKME
311da177e4SLinus Torvalds static unsigned char *brokenbuf;
321da177e4SLinus Torvalds #endif
331da177e4SLinus Torvalds 
34daba5cc4SArtem B. Bityutskiy #define PAGE_DIV(x) ( ((unsigned long)(x) / (unsigned long)(c->wbuf_pagesize)) * (unsigned long)(c->wbuf_pagesize) )
35daba5cc4SArtem B. Bityutskiy #define PAGE_MOD(x) ( (unsigned long)(x) % (unsigned long)(c->wbuf_pagesize) )
36daba5cc4SArtem B. Bityutskiy 
371da177e4SLinus Torvalds /* max. erase failures before we mark a block bad */
381da177e4SLinus Torvalds #define MAX_ERASE_FAILURES 	2
391da177e4SLinus Torvalds 
401da177e4SLinus Torvalds struct jffs2_inodirty {
411da177e4SLinus Torvalds 	uint32_t ino;
421da177e4SLinus Torvalds 	struct jffs2_inodirty *next;
431da177e4SLinus Torvalds };
441da177e4SLinus Torvalds 
451da177e4SLinus Torvalds static struct jffs2_inodirty inodirty_nomem;
461da177e4SLinus Torvalds 
471da177e4SLinus Torvalds static int jffs2_wbuf_pending_for_ino(struct jffs2_sb_info *c, uint32_t ino)
481da177e4SLinus Torvalds {
491da177e4SLinus Torvalds 	struct jffs2_inodirty *this = c->wbuf_inodes;
501da177e4SLinus Torvalds 
511da177e4SLinus Torvalds 	/* If a malloc failed, consider _everything_ dirty */
521da177e4SLinus Torvalds 	if (this == &inodirty_nomem)
531da177e4SLinus Torvalds 		return 1;
541da177e4SLinus Torvalds 
551da177e4SLinus Torvalds 	/* If ino == 0, _any_ non-GC writes mean 'yes' */
561da177e4SLinus Torvalds 	if (this && !ino)
571da177e4SLinus Torvalds 		return 1;
581da177e4SLinus Torvalds 
591da177e4SLinus Torvalds 	/* Look to see if the inode in question is pending in the wbuf */
601da177e4SLinus Torvalds 	while (this) {
611da177e4SLinus Torvalds 		if (this->ino == ino)
621da177e4SLinus Torvalds 			return 1;
631da177e4SLinus Torvalds 		this = this->next;
641da177e4SLinus Torvalds 	}
651da177e4SLinus Torvalds 	return 0;
661da177e4SLinus Torvalds }
671da177e4SLinus Torvalds 
681da177e4SLinus Torvalds static void jffs2_clear_wbuf_ino_list(struct jffs2_sb_info *c)
691da177e4SLinus Torvalds {
701da177e4SLinus Torvalds 	struct jffs2_inodirty *this;
711da177e4SLinus Torvalds 
721da177e4SLinus Torvalds 	this = c->wbuf_inodes;
731da177e4SLinus Torvalds 
741da177e4SLinus Torvalds 	if (this != &inodirty_nomem) {
751da177e4SLinus Torvalds 		while (this) {
761da177e4SLinus Torvalds 			struct jffs2_inodirty *next = this->next;
771da177e4SLinus Torvalds 			kfree(this);
781da177e4SLinus Torvalds 			this = next;
791da177e4SLinus Torvalds 		}
801da177e4SLinus Torvalds 	}
811da177e4SLinus Torvalds 	c->wbuf_inodes = NULL;
821da177e4SLinus Torvalds }
831da177e4SLinus Torvalds 
841da177e4SLinus Torvalds static void jffs2_wbuf_dirties_inode(struct jffs2_sb_info *c, uint32_t ino)
851da177e4SLinus Torvalds {
861da177e4SLinus Torvalds 	struct jffs2_inodirty *new;
871da177e4SLinus Torvalds 
881da177e4SLinus Torvalds 	/* Mark the superblock dirty so that kupdated will flush... */
8964a5c2ebSJoakim Tjernlund 	jffs2_dirty_trigger(c);
901da177e4SLinus Torvalds 
911da177e4SLinus Torvalds 	if (jffs2_wbuf_pending_for_ino(c, ino))
921da177e4SLinus Torvalds 		return;
931da177e4SLinus Torvalds 
941da177e4SLinus Torvalds 	new = kmalloc(sizeof(*new), GFP_KERNEL);
951da177e4SLinus Torvalds 	if (!new) {
969c261b33SJoe Perches 		jffs2_dbg(1, "No memory to allocate inodirty. Fallback to all considered dirty\n");
971da177e4SLinus Torvalds 		jffs2_clear_wbuf_ino_list(c);
981da177e4SLinus Torvalds 		c->wbuf_inodes = &inodirty_nomem;
991da177e4SLinus Torvalds 		return;
1001da177e4SLinus Torvalds 	}
1011da177e4SLinus Torvalds 	new->ino = ino;
1021da177e4SLinus Torvalds 	new->next = c->wbuf_inodes;
1031da177e4SLinus Torvalds 	c->wbuf_inodes = new;
1041da177e4SLinus Torvalds 	return;
1051da177e4SLinus Torvalds }
1061da177e4SLinus Torvalds 
1071da177e4SLinus Torvalds static inline void jffs2_refile_wbuf_blocks(struct jffs2_sb_info *c)
1081da177e4SLinus Torvalds {
1091da177e4SLinus Torvalds 	struct list_head *this, *next;
1101da177e4SLinus Torvalds 	static int n;
1111da177e4SLinus Torvalds 
1121da177e4SLinus Torvalds 	if (list_empty(&c->erasable_pending_wbuf_list))
1131da177e4SLinus Torvalds 		return;
1141da177e4SLinus Torvalds 
1151da177e4SLinus Torvalds 	list_for_each_safe(this, next, &c->erasable_pending_wbuf_list) {
1161da177e4SLinus Torvalds 		struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
1171da177e4SLinus Torvalds 
1189c261b33SJoe Perches 		jffs2_dbg(1, "Removing eraseblock at 0x%08x from erasable_pending_wbuf_list...\n",
1199c261b33SJoe Perches 			  jeb->offset);
1201da177e4SLinus Torvalds 		list_del(this);
1211da177e4SLinus Torvalds 		if ((jiffies + (n++)) & 127) {
1221da177e4SLinus Torvalds 			/* Most of the time, we just erase it immediately. Otherwise we
1231da177e4SLinus Torvalds 			   spend ages scanning it on mount, etc. */
1249c261b33SJoe Perches 			jffs2_dbg(1, "...and adding to erase_pending_list\n");
1251da177e4SLinus Torvalds 			list_add_tail(&jeb->list, &c->erase_pending_list);
1261da177e4SLinus Torvalds 			c->nr_erasing_blocks++;
127ae3b6ba0SDavid Woodhouse 			jffs2_garbage_collect_trigger(c);
1281da177e4SLinus Torvalds 		} else {
1291da177e4SLinus Torvalds 			/* Sometimes, however, we leave it elsewhere so it doesn't get
1301da177e4SLinus Torvalds 			   immediately reused, and we spread the load a bit. */
1319c261b33SJoe Perches 			jffs2_dbg(1, "...and adding to erasable_list\n");
1321da177e4SLinus Torvalds 			list_add_tail(&jeb->list, &c->erasable_list);
1331da177e4SLinus Torvalds 		}
1341da177e4SLinus Torvalds 	}
1351da177e4SLinus Torvalds }
1361da177e4SLinus Torvalds 
1377f716cf3SEstelle Hammache #define REFILE_NOTEMPTY 0
1387f716cf3SEstelle Hammache #define REFILE_ANYWAY   1
1397f716cf3SEstelle Hammache 
1407f716cf3SEstelle Hammache static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int allow_empty)
1411da177e4SLinus Torvalds {
1429c261b33SJoe Perches 	jffs2_dbg(1, "About to refile bad block at %08x\n", jeb->offset);
1431da177e4SLinus Torvalds 
1441da177e4SLinus Torvalds 	/* File the existing block on the bad_used_list.... */
1451da177e4SLinus Torvalds 	if (c->nextblock == jeb)
1461da177e4SLinus Torvalds 		c->nextblock = NULL;
1471da177e4SLinus Torvalds 	else /* Not sure this should ever happen... need more coffee */
1481da177e4SLinus Torvalds 		list_del(&jeb->list);
1491da177e4SLinus Torvalds 	if (jeb->first_node) {
1509c261b33SJoe Perches 		jffs2_dbg(1, "Refiling block at %08x to bad_used_list\n",
1519c261b33SJoe Perches 			  jeb->offset);
1521da177e4SLinus Torvalds 		list_add(&jeb->list, &c->bad_used_list);
1531da177e4SLinus Torvalds 	} else {
1549b88f473SEstelle Hammache 		BUG_ON(allow_empty == REFILE_NOTEMPTY);
1551da177e4SLinus Torvalds 		/* It has to have had some nodes or we couldn't be here */
1569c261b33SJoe Perches 		jffs2_dbg(1, "Refiling block at %08x to erase_pending_list\n",
1579c261b33SJoe Perches 			  jeb->offset);
1581da177e4SLinus Torvalds 		list_add(&jeb->list, &c->erase_pending_list);
1591da177e4SLinus Torvalds 		c->nr_erasing_blocks++;
160ae3b6ba0SDavid Woodhouse 		jffs2_garbage_collect_trigger(c);
1611da177e4SLinus Torvalds 	}
1621da177e4SLinus Torvalds 
1639bfeb691SDavid Woodhouse 	if (!jffs2_prealloc_raw_node_refs(c, jeb, 1)) {
1649bfeb691SDavid Woodhouse 		uint32_t oldfree = jeb->free_size;
1659bfeb691SDavid Woodhouse 
1669bfeb691SDavid Woodhouse 		jffs2_link_node_ref(c, jeb,
1679bfeb691SDavid Woodhouse 				    (jeb->offset+c->sector_size-oldfree) | REF_OBSOLETE,
1689bfeb691SDavid Woodhouse 				    oldfree, NULL);
1699bfeb691SDavid Woodhouse 		/* convert to wasted */
1709bfeb691SDavid Woodhouse 		c->wasted_size += oldfree;
1719bfeb691SDavid Woodhouse 		jeb->wasted_size += oldfree;
1729bfeb691SDavid Woodhouse 		c->dirty_size -= oldfree;
1739bfeb691SDavid Woodhouse 		jeb->dirty_size -= oldfree;
1749bfeb691SDavid Woodhouse 	}
1751da177e4SLinus Torvalds 
176e0c8e42fSArtem B. Bityutskiy 	jffs2_dbg_dump_block_lists_nolock(c);
177e0c8e42fSArtem B. Bityutskiy 	jffs2_dbg_acct_sanity_check_nolock(c,jeb);
178e0c8e42fSArtem B. Bityutskiy 	jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
1791da177e4SLinus Torvalds }
1801da177e4SLinus Torvalds 
1819bfeb691SDavid Woodhouse static struct jffs2_raw_node_ref **jffs2_incore_replace_raw(struct jffs2_sb_info *c,
1829bfeb691SDavid Woodhouse 							    struct jffs2_inode_info *f,
1839bfeb691SDavid Woodhouse 							    struct jffs2_raw_node_ref *raw,
1849bfeb691SDavid Woodhouse 							    union jffs2_node_union *node)
1859bfeb691SDavid Woodhouse {
1869bfeb691SDavid Woodhouse 	struct jffs2_node_frag *frag;
1879bfeb691SDavid Woodhouse 	struct jffs2_full_dirent *fd;
1889bfeb691SDavid Woodhouse 
1899bfeb691SDavid Woodhouse 	dbg_noderef("incore_replace_raw: node at %p is {%04x,%04x}\n",
1909bfeb691SDavid Woodhouse 		    node, je16_to_cpu(node->u.magic), je16_to_cpu(node->u.nodetype));
1919bfeb691SDavid Woodhouse 
1929bfeb691SDavid Woodhouse 	BUG_ON(je16_to_cpu(node->u.magic) != 0x1985 &&
1939bfeb691SDavid Woodhouse 	       je16_to_cpu(node->u.magic) != 0);
1949bfeb691SDavid Woodhouse 
1959bfeb691SDavid Woodhouse 	switch (je16_to_cpu(node->u.nodetype)) {
1969bfeb691SDavid Woodhouse 	case JFFS2_NODETYPE_INODE:
197ddc58bd6SDavid Woodhouse 		if (f->metadata && f->metadata->raw == raw) {
198ddc58bd6SDavid Woodhouse 			dbg_noderef("Will replace ->raw in f->metadata at %p\n", f->metadata);
199ddc58bd6SDavid Woodhouse 			return &f->metadata->raw;
200ddc58bd6SDavid Woodhouse 		}
2019bfeb691SDavid Woodhouse 		frag = jffs2_lookup_node_frag(&f->fragtree, je32_to_cpu(node->i.offset));
2029bfeb691SDavid Woodhouse 		BUG_ON(!frag);
2039bfeb691SDavid Woodhouse 		/* Find a frag which refers to the full_dnode we want to modify */
2049bfeb691SDavid Woodhouse 		while (!frag->node || frag->node->raw != raw) {
2059bfeb691SDavid Woodhouse 			frag = frag_next(frag);
2069bfeb691SDavid Woodhouse 			BUG_ON(!frag);
2079bfeb691SDavid Woodhouse 		}
2089bfeb691SDavid Woodhouse 		dbg_noderef("Will replace ->raw in full_dnode at %p\n", frag->node);
2099bfeb691SDavid Woodhouse 		return &frag->node->raw;
2109bfeb691SDavid Woodhouse 
2119bfeb691SDavid Woodhouse 	case JFFS2_NODETYPE_DIRENT:
2129bfeb691SDavid Woodhouse 		for (fd = f->dents; fd; fd = fd->next) {
2139bfeb691SDavid Woodhouse 			if (fd->raw == raw) {
2149bfeb691SDavid Woodhouse 				dbg_noderef("Will replace ->raw in full_dirent at %p\n", fd);
2159bfeb691SDavid Woodhouse 				return &fd->raw;
2169bfeb691SDavid Woodhouse 			}
2179bfeb691SDavid Woodhouse 		}
2189bfeb691SDavid Woodhouse 		BUG();
219ddc58bd6SDavid Woodhouse 
2209bfeb691SDavid Woodhouse 	default:
2219bfeb691SDavid Woodhouse 		dbg_noderef("Don't care about replacing raw for nodetype %x\n",
2229bfeb691SDavid Woodhouse 			    je16_to_cpu(node->u.nodetype));
2239bfeb691SDavid Woodhouse 		break;
2249bfeb691SDavid Woodhouse 	}
2259bfeb691SDavid Woodhouse 	return NULL;
2269bfeb691SDavid Woodhouse }
2279bfeb691SDavid Woodhouse 
228a6bc432eSDavid Woodhouse #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
229a6bc432eSDavid Woodhouse static int jffs2_verify_write(struct jffs2_sb_info *c, unsigned char *buf,
230a6bc432eSDavid Woodhouse 			      uint32_t ofs)
231a6bc432eSDavid Woodhouse {
232a6bc432eSDavid Woodhouse 	int ret;
233a6bc432eSDavid Woodhouse 	size_t retlen;
234a6bc432eSDavid Woodhouse 	char *eccstr;
235a6bc432eSDavid Woodhouse 
236329ad399SArtem Bityutskiy 	ret = mtd_read(c->mtd, ofs, c->wbuf_pagesize, &retlen, c->wbuf_verify);
237a6bc432eSDavid Woodhouse 	if (ret && ret != -EUCLEAN && ret != -EBADMSG) {
238da320f05SJoe Perches 		pr_warn("%s(): Read back of page at %08x failed: %d\n",
239da320f05SJoe Perches 			__func__, c->wbuf_ofs, ret);
240a6bc432eSDavid Woodhouse 		return ret;
241a6bc432eSDavid Woodhouse 	} else if (retlen != c->wbuf_pagesize) {
242da320f05SJoe Perches 		pr_warn("%s(): Read back of page at %08x gave short read: %zd not %d\n",
243da320f05SJoe Perches 			__func__, ofs, retlen, c->wbuf_pagesize);
244a6bc432eSDavid Woodhouse 		return -EIO;
245a6bc432eSDavid Woodhouse 	}
246a6bc432eSDavid Woodhouse 	if (!memcmp(buf, c->wbuf_verify, c->wbuf_pagesize))
247a6bc432eSDavid Woodhouse 		return 0;
248a6bc432eSDavid Woodhouse 
249a6bc432eSDavid Woodhouse 	if (ret == -EUCLEAN)
250a6bc432eSDavid Woodhouse 		eccstr = "corrected";
251a6bc432eSDavid Woodhouse 	else if (ret == -EBADMSG)
252a6bc432eSDavid Woodhouse 		eccstr = "correction failed";
253a6bc432eSDavid Woodhouse 	else
254a6bc432eSDavid Woodhouse 		eccstr = "OK or unused";
255a6bc432eSDavid Woodhouse 
256da320f05SJoe Perches 	pr_warn("Write verify error (ECC %s) at %08x. Wrote:\n",
257a6bc432eSDavid Woodhouse 		eccstr, c->wbuf_ofs);
258a6bc432eSDavid Woodhouse 	print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1,
259a6bc432eSDavid Woodhouse 		       c->wbuf, c->wbuf_pagesize, 0);
260a6bc432eSDavid Woodhouse 
261da320f05SJoe Perches 	pr_warn("Read back:\n");
262a6bc432eSDavid Woodhouse 	print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1,
263a6bc432eSDavid Woodhouse 		       c->wbuf_verify, c->wbuf_pagesize, 0);
264a6bc432eSDavid Woodhouse 
265a6bc432eSDavid Woodhouse 	return -EIO;
266a6bc432eSDavid Woodhouse }
267a6bc432eSDavid Woodhouse #else
268a6bc432eSDavid Woodhouse #define jffs2_verify_write(c,b,o) (0)
269a6bc432eSDavid Woodhouse #endif
270a6bc432eSDavid Woodhouse 
2711da177e4SLinus Torvalds /* Recover from failure to write wbuf. Recover the nodes up to the
2721da177e4SLinus Torvalds  * wbuf, not the one which we were starting to try to write. */
2731da177e4SLinus Torvalds 
2741da177e4SLinus Torvalds static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
2751da177e4SLinus Torvalds {
2761da177e4SLinus Torvalds 	struct jffs2_eraseblock *jeb, *new_jeb;
2779bfeb691SDavid Woodhouse 	struct jffs2_raw_node_ref *raw, *next, *first_raw = NULL;
2781da177e4SLinus Torvalds 	size_t retlen;
2791da177e4SLinus Torvalds 	int ret;
2809bfeb691SDavid Woodhouse 	int nr_refile = 0;
2811da177e4SLinus Torvalds 	unsigned char *buf;
2821da177e4SLinus Torvalds 	uint32_t start, end, ofs, len;
2831da177e4SLinus Torvalds 
284046b8b98SDavid Woodhouse 	jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
285046b8b98SDavid Woodhouse 
2861da177e4SLinus Torvalds 	spin_lock(&c->erase_completion_lock);
287180bfb31SVitaly Wool 	if (c->wbuf_ofs % c->mtd->erasesize)
2887f716cf3SEstelle Hammache 		jffs2_block_refile(c, jeb, REFILE_NOTEMPTY);
289180bfb31SVitaly Wool 	else
290180bfb31SVitaly Wool 		jffs2_block_refile(c, jeb, REFILE_ANYWAY);
2919bfeb691SDavid Woodhouse 	spin_unlock(&c->erase_completion_lock);
2929bfeb691SDavid Woodhouse 
2939bfeb691SDavid Woodhouse 	BUG_ON(!ref_obsolete(jeb->last_node));
2941da177e4SLinus Torvalds 
2951da177e4SLinus Torvalds 	/* Find the first node to be recovered, by skipping over every
2961da177e4SLinus Torvalds 	   node which ends before the wbuf starts, or which is obsolete. */
2979bfeb691SDavid Woodhouse 	for (next = raw = jeb->first_node; next; raw = next) {
2989bfeb691SDavid Woodhouse 		next = ref_next(raw);
2999bfeb691SDavid Woodhouse 
3009bfeb691SDavid Woodhouse 		if (ref_obsolete(raw) ||
3019bfeb691SDavid Woodhouse 		    (next && ref_offset(next) <= c->wbuf_ofs)) {
3029bfeb691SDavid Woodhouse 			dbg_noderef("Skipping node at 0x%08x(%d)-0x%08x which is either before 0x%08x or obsolete\n",
3039bfeb691SDavid Woodhouse 				    ref_offset(raw), ref_flags(raw),
3049bfeb691SDavid Woodhouse 				    (ref_offset(raw) + ref_totlen(c, jeb, raw)),
3059bfeb691SDavid Woodhouse 				    c->wbuf_ofs);
3069bfeb691SDavid Woodhouse 			continue;
3079bfeb691SDavid Woodhouse 		}
3089bfeb691SDavid Woodhouse 		dbg_noderef("First node to be recovered is at 0x%08x(%d)-0x%08x\n",
3099bfeb691SDavid Woodhouse 			    ref_offset(raw), ref_flags(raw),
3109bfeb691SDavid Woodhouse 			    (ref_offset(raw) + ref_totlen(c, jeb, raw)));
3119bfeb691SDavid Woodhouse 
3129bfeb691SDavid Woodhouse 		first_raw = raw;
3139bfeb691SDavid Woodhouse 		break;
3141da177e4SLinus Torvalds 	}
3151da177e4SLinus Torvalds 
3169bfeb691SDavid Woodhouse 	if (!first_raw) {
3171da177e4SLinus Torvalds 		/* All nodes were obsolete. Nothing to recover. */
3189c261b33SJoe Perches 		jffs2_dbg(1, "No non-obsolete nodes to be recovered. Just filing block bad\n");
3199bfeb691SDavid Woodhouse 		c->wbuf_len = 0;
3201da177e4SLinus Torvalds 		return;
3211da177e4SLinus Torvalds 	}
3221da177e4SLinus Torvalds 
3239bfeb691SDavid Woodhouse 	start = ref_offset(first_raw);
3249bfeb691SDavid Woodhouse 	end = ref_offset(jeb->last_node);
3259bfeb691SDavid Woodhouse 	nr_refile = 1;
3261da177e4SLinus Torvalds 
3279bfeb691SDavid Woodhouse 	/* Count the number of refs which need to be copied */
3289bfeb691SDavid Woodhouse 	while ((raw = ref_next(raw)) != jeb->last_node)
3299bfeb691SDavid Woodhouse 		nr_refile++;
3301da177e4SLinus Torvalds 
3319bfeb691SDavid Woodhouse 	dbg_noderef("wbuf recover %08x-%08x (%d bytes in %d nodes)\n",
3329bfeb691SDavid Woodhouse 		    start, end, end - start, nr_refile);
3331da177e4SLinus Torvalds 
3341da177e4SLinus Torvalds 	buf = NULL;
3351da177e4SLinus Torvalds 	if (start < c->wbuf_ofs) {
3361da177e4SLinus Torvalds 		/* First affected node was already partially written.
3371da177e4SLinus Torvalds 		 * Attempt to reread the old data into our buffer. */
3381da177e4SLinus Torvalds 
3391da177e4SLinus Torvalds 		buf = kmalloc(end - start, GFP_KERNEL);
3401da177e4SLinus Torvalds 		if (!buf) {
341da320f05SJoe Perches 			pr_crit("Malloc failure in wbuf recovery. Data loss ensues.\n");
3421da177e4SLinus Torvalds 
3431da177e4SLinus Torvalds 			goto read_failed;
3441da177e4SLinus Torvalds 		}
3451da177e4SLinus Torvalds 
3461da177e4SLinus Torvalds 		/* Do the read... */
347329ad399SArtem Bityutskiy 		ret = mtd_read(c->mtd, start, c->wbuf_ofs - start, &retlen,
348329ad399SArtem Bityutskiy 			       buf);
3491da177e4SLinus Torvalds 
3509a1fcdfdSThomas Gleixner 		/* ECC recovered ? */
3519a1fcdfdSThomas Gleixner 		if ((ret == -EUCLEAN || ret == -EBADMSG) &&
3529a1fcdfdSThomas Gleixner 		    (retlen == c->wbuf_ofs - start))
3531da177e4SLinus Torvalds 			ret = 0;
3549a1fcdfdSThomas Gleixner 
3551da177e4SLinus Torvalds 		if (ret || retlen != c->wbuf_ofs - start) {
356da320f05SJoe Perches 			pr_crit("Old data are already lost in wbuf recovery. Data loss ensues.\n");
3571da177e4SLinus Torvalds 
3581da177e4SLinus Torvalds 			kfree(buf);
3591da177e4SLinus Torvalds 			buf = NULL;
3601da177e4SLinus Torvalds 		read_failed:
3619bfeb691SDavid Woodhouse 			first_raw = ref_next(first_raw);
3629bfeb691SDavid Woodhouse 			nr_refile--;
3639bfeb691SDavid Woodhouse 			while (first_raw && ref_obsolete(first_raw)) {
3649bfeb691SDavid Woodhouse 				first_raw = ref_next(first_raw);
3659bfeb691SDavid Woodhouse 				nr_refile--;
3669bfeb691SDavid Woodhouse 			}
3679bfeb691SDavid Woodhouse 
3681da177e4SLinus Torvalds 			/* If this was the only node to be recovered, give up */
3699bfeb691SDavid Woodhouse 			if (!first_raw) {
3709bfeb691SDavid Woodhouse 				c->wbuf_len = 0;
3711da177e4SLinus Torvalds 				return;
3729bfeb691SDavid Woodhouse 			}
3731da177e4SLinus Torvalds 
3741da177e4SLinus Torvalds 			/* It wasn't. Go on and try to recover nodes complete in the wbuf */
3759bfeb691SDavid Woodhouse 			start = ref_offset(first_raw);
3769bfeb691SDavid Woodhouse 			dbg_noderef("wbuf now recover %08x-%08x (%d bytes in %d nodes)\n",
3779bfeb691SDavid Woodhouse 				    start, end, end - start, nr_refile);
3789bfeb691SDavid Woodhouse 
3791da177e4SLinus Torvalds 		} else {
3801da177e4SLinus Torvalds 			/* Read succeeded. Copy the remaining data from the wbuf */
3811da177e4SLinus Torvalds 			memcpy(buf + (c->wbuf_ofs - start), c->wbuf, end - c->wbuf_ofs);
3821da177e4SLinus Torvalds 		}
3831da177e4SLinus Torvalds 	}
3841da177e4SLinus Torvalds 	/* OK... we're to rewrite (end-start) bytes of data from first_raw onwards.
3851da177e4SLinus Torvalds 	   Either 'buf' contains the data, or we find it in the wbuf */
3861da177e4SLinus Torvalds 
3871da177e4SLinus Torvalds 	/* ... and get an allocation of space from a shiny new block instead */
3889fe4854cSDavid Woodhouse 	ret = jffs2_reserve_space_gc(c, end-start, &len, JFFS2_SUMMARY_NOSUM_SIZE);
3891da177e4SLinus Torvalds 	if (ret) {
390da320f05SJoe Perches 		pr_warn("Failed to allocate space for wbuf recovery. Data loss ensues.\n");
3911da177e4SLinus Torvalds 		kfree(buf);
3921da177e4SLinus Torvalds 		return;
3931da177e4SLinus Torvalds 	}
3949bfeb691SDavid Woodhouse 
3957f762ab2SAdrian Hunter 	/* The summary is not recovered, so it must be disabled for this erase block */
3967f762ab2SAdrian Hunter 	jffs2_sum_disable_collecting(c->summary);
3977f762ab2SAdrian Hunter 
3989bfeb691SDavid Woodhouse 	ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, nr_refile);
3999bfeb691SDavid Woodhouse 	if (ret) {
400da320f05SJoe Perches 		pr_warn("Failed to allocate node refs for wbuf recovery. Data loss ensues.\n");
4019bfeb691SDavid Woodhouse 		kfree(buf);
4029bfeb691SDavid Woodhouse 		return;
4039bfeb691SDavid Woodhouse 	}
4049bfeb691SDavid Woodhouse 
4059fe4854cSDavid Woodhouse 	ofs = write_ofs(c);
4069fe4854cSDavid Woodhouse 
4071da177e4SLinus Torvalds 	if (end-start >= c->wbuf_pagesize) {
4087f716cf3SEstelle Hammache 		/* Need to do another write immediately, but it's possible
4097f716cf3SEstelle Hammache 		   that this is just because the wbuf itself is completely
4107f716cf3SEstelle Hammache 		   full, and there's nothing earlier read back from the
4117f716cf3SEstelle Hammache 		   flash. Hence 'buf' isn't necessarily what we're writing
4127f716cf3SEstelle Hammache 		   from. */
4137f716cf3SEstelle Hammache 		unsigned char *rewrite_buf = buf?:c->wbuf;
4141da177e4SLinus Torvalds 		uint32_t towrite = (end-start) - ((end-start)%c->wbuf_pagesize);
4151da177e4SLinus Torvalds 
4169c261b33SJoe Perches 		jffs2_dbg(1, "Write 0x%x bytes at 0x%08x in wbuf recover\n",
4179c261b33SJoe Perches 			  towrite, ofs);
4181da177e4SLinus Torvalds 
4191da177e4SLinus Torvalds #ifdef BREAKMEHEADER
4201da177e4SLinus Torvalds 		static int breakme;
4211da177e4SLinus Torvalds 		if (breakme++ == 20) {
422da320f05SJoe Perches 			pr_notice("Faking write error at 0x%08x\n", ofs);
4231da177e4SLinus Torvalds 			breakme = 0;
424eda95cbfSArtem Bityutskiy 			mtd_write(c->mtd, ofs, towrite, &retlen, brokenbuf);
4251da177e4SLinus Torvalds 			ret = -EIO;
4261da177e4SLinus Torvalds 		} else
4271da177e4SLinus Torvalds #endif
428eda95cbfSArtem Bityutskiy 			ret = mtd_write(c->mtd, ofs, towrite, &retlen,
4299223a456SThomas Gleixner 					rewrite_buf);
4301da177e4SLinus Torvalds 
431a6bc432eSDavid Woodhouse 		if (ret || retlen != towrite || jffs2_verify_write(c, rewrite_buf, ofs)) {
4321da177e4SLinus Torvalds 			/* Argh. We tried. Really we did. */
433da320f05SJoe Perches 			pr_crit("Recovery of wbuf failed due to a second write error\n");
4341da177e4SLinus Torvalds 			kfree(buf);
4351da177e4SLinus Torvalds 
4362f785402SDavid Woodhouse 			if (retlen)
4379bfeb691SDavid Woodhouse 				jffs2_add_physical_node_ref(c, ofs | REF_OBSOLETE, ref_totlen(c, jeb, first_raw), NULL);
4381da177e4SLinus Torvalds 
4391da177e4SLinus Torvalds 			return;
4401da177e4SLinus Torvalds 		}
441da320f05SJoe Perches 		pr_notice("Recovery of wbuf succeeded to %08x\n", ofs);
4421da177e4SLinus Torvalds 
4431da177e4SLinus Torvalds 		c->wbuf_len = (end - start) - towrite;
4441da177e4SLinus Torvalds 		c->wbuf_ofs = ofs + towrite;
4457f716cf3SEstelle Hammache 		memmove(c->wbuf, rewrite_buf + towrite, c->wbuf_len);
4461da177e4SLinus Torvalds 		/* Don't muck about with c->wbuf_inodes. False positives are harmless. */
4471da177e4SLinus Torvalds 	} else {
4481da177e4SLinus Torvalds 		/* OK, now we're left with the dregs in whichever buffer we're using */
4491da177e4SLinus Torvalds 		if (buf) {
4501da177e4SLinus Torvalds 			memcpy(c->wbuf, buf, end-start);
4511da177e4SLinus Torvalds 		} else {
4521da177e4SLinus Torvalds 			memmove(c->wbuf, c->wbuf + (start - c->wbuf_ofs), end - start);
4531da177e4SLinus Torvalds 		}
4541da177e4SLinus Torvalds 		c->wbuf_ofs = ofs;
4551da177e4SLinus Torvalds 		c->wbuf_len = end - start;
4561da177e4SLinus Torvalds 	}
4571da177e4SLinus Torvalds 
4581da177e4SLinus Torvalds 	/* Now sort out the jffs2_raw_node_refs, moving them from the old to the next block */
4591da177e4SLinus Torvalds 	new_jeb = &c->blocks[ofs / c->sector_size];
4601da177e4SLinus Torvalds 
4611da177e4SLinus Torvalds 	spin_lock(&c->erase_completion_lock);
4629bfeb691SDavid Woodhouse 	for (raw = first_raw; raw != jeb->last_node; raw = ref_next(raw)) {
4639bfeb691SDavid Woodhouse 		uint32_t rawlen = ref_totlen(c, jeb, raw);
4649bfeb691SDavid Woodhouse 		struct jffs2_inode_cache *ic;
4659bfeb691SDavid Woodhouse 		struct jffs2_raw_node_ref *new_ref;
4669bfeb691SDavid Woodhouse 		struct jffs2_raw_node_ref **adjust_ref = NULL;
4679bfeb691SDavid Woodhouse 		struct jffs2_inode_info *f = NULL;
4681da177e4SLinus Torvalds 
4699c261b33SJoe Perches 		jffs2_dbg(1, "Refiling block of %08x at %08x(%d) to %08x\n",
4709c261b33SJoe Perches 			  rawlen, ref_offset(raw), ref_flags(raw), ofs);
4711da177e4SLinus Torvalds 
4729bfeb691SDavid Woodhouse 		ic = jffs2_raw_ref_to_ic(raw);
4739bfeb691SDavid Woodhouse 
4749bfeb691SDavid Woodhouse 		/* Ick. This XATTR mess should be fixed shortly... */
4759bfeb691SDavid Woodhouse 		if (ic && ic->class == RAWNODE_CLASS_XATTR_DATUM) {
4769bfeb691SDavid Woodhouse 			struct jffs2_xattr_datum *xd = (void *)ic;
4779bfeb691SDavid Woodhouse 			BUG_ON(xd->node != raw);
4789bfeb691SDavid Woodhouse 			adjust_ref = &xd->node;
4799bfeb691SDavid Woodhouse 			raw->next_in_ino = NULL;
4809bfeb691SDavid Woodhouse 			ic = NULL;
4819bfeb691SDavid Woodhouse 		} else if (ic && ic->class == RAWNODE_CLASS_XATTR_REF) {
4829bfeb691SDavid Woodhouse 			struct jffs2_xattr_datum *xr = (void *)ic;
4839bfeb691SDavid Woodhouse 			BUG_ON(xr->node != raw);
4849bfeb691SDavid Woodhouse 			adjust_ref = &xr->node;
4859bfeb691SDavid Woodhouse 			raw->next_in_ino = NULL;
4869bfeb691SDavid Woodhouse 			ic = NULL;
4879bfeb691SDavid Woodhouse 		} else if (ic && ic->class == RAWNODE_CLASS_INODE_CACHE) {
4889bfeb691SDavid Woodhouse 			struct jffs2_raw_node_ref **p = &ic->nodes;
4899bfeb691SDavid Woodhouse 
4909bfeb691SDavid Woodhouse 			/* Remove the old node from the per-inode list */
4919bfeb691SDavid Woodhouse 			while (*p && *p != (void *)ic) {
4929bfeb691SDavid Woodhouse 				if (*p == raw) {
4939bfeb691SDavid Woodhouse 					(*p) = (raw->next_in_ino);
4949bfeb691SDavid Woodhouse 					raw->next_in_ino = NULL;
4959bfeb691SDavid Woodhouse 					break;
4969bfeb691SDavid Woodhouse 				}
4979bfeb691SDavid Woodhouse 				p = &((*p)->next_in_ino);
4989bfeb691SDavid Woodhouse 			}
4999bfeb691SDavid Woodhouse 
5009bfeb691SDavid Woodhouse 			if (ic->state == INO_STATE_PRESENT && !ref_obsolete(raw)) {
5019bfeb691SDavid Woodhouse 				/* If it's an in-core inode, then we have to adjust any
5029bfeb691SDavid Woodhouse 				   full_dirent or full_dnode structure to point to the
5039bfeb691SDavid Woodhouse 				   new version instead of the old */
50427c72b04SDavid Woodhouse 				f = jffs2_gc_fetch_inode(c, ic->ino, !ic->pino_nlink);
5059bfeb691SDavid Woodhouse 				if (IS_ERR(f)) {
5069bfeb691SDavid Woodhouse 					/* Should never happen; it _must_ be present */
5079bfeb691SDavid Woodhouse 					JFFS2_ERROR("Failed to iget() ino #%u, err %ld\n",
5089bfeb691SDavid Woodhouse 						    ic->ino, PTR_ERR(f));
5099bfeb691SDavid Woodhouse 					BUG();
5109bfeb691SDavid Woodhouse 				}
5119bfeb691SDavid Woodhouse 				/* We don't lock f->sem. There's a number of ways we could
5129bfeb691SDavid Woodhouse 				   end up in here with it already being locked, and nobody's
5139bfeb691SDavid Woodhouse 				   going to modify it on us anyway because we hold the
5149bfeb691SDavid Woodhouse 				   alloc_sem. We're only changing one ->raw pointer too,
5159bfeb691SDavid Woodhouse 				   which we can get away with without upsetting readers. */
5169bfeb691SDavid Woodhouse 				adjust_ref = jffs2_incore_replace_raw(c, f, raw,
5179bfeb691SDavid Woodhouse 								      (void *)(buf?:c->wbuf) + (ref_offset(raw) - start));
5189bfeb691SDavid Woodhouse 			} else if (unlikely(ic->state != INO_STATE_PRESENT &&
5199bfeb691SDavid Woodhouse 					    ic->state != INO_STATE_CHECKEDABSENT &&
5209bfeb691SDavid Woodhouse 					    ic->state != INO_STATE_GC)) {
5219bfeb691SDavid Woodhouse 				JFFS2_ERROR("Inode #%u is in strange state %d!\n", ic->ino, ic->state);
5229bfeb691SDavid Woodhouse 				BUG();
5239bfeb691SDavid Woodhouse 			}
5249bfeb691SDavid Woodhouse 		}
5259bfeb691SDavid Woodhouse 
5269bfeb691SDavid Woodhouse 		new_ref = jffs2_link_node_ref(c, new_jeb, ofs | ref_flags(raw), rawlen, ic);
5279bfeb691SDavid Woodhouse 
5289bfeb691SDavid Woodhouse 		if (adjust_ref) {
5299bfeb691SDavid Woodhouse 			BUG_ON(*adjust_ref != raw);
5309bfeb691SDavid Woodhouse 			*adjust_ref = new_ref;
5319bfeb691SDavid Woodhouse 		}
5329bfeb691SDavid Woodhouse 		if (f)
5339bfeb691SDavid Woodhouse 			jffs2_gc_release_inode(c, f);
5349bfeb691SDavid Woodhouse 
5359bfeb691SDavid Woodhouse 		if (!ref_obsolete(raw)) {
5361da177e4SLinus Torvalds 			jeb->dirty_size += rawlen;
5371da177e4SLinus Torvalds 			jeb->used_size  -= rawlen;
5381da177e4SLinus Torvalds 			c->dirty_size += rawlen;
5399bfeb691SDavid Woodhouse 			c->used_size -= rawlen;
5409bfeb691SDavid Woodhouse 			raw->flash_offset = ref_offset(raw) | REF_OBSOLETE;
5419bfeb691SDavid Woodhouse 			BUG_ON(raw->next_in_ino);
5421da177e4SLinus Torvalds 		}
5431da177e4SLinus Torvalds 		ofs += rawlen;
5441da177e4SLinus Torvalds 	}
5451da177e4SLinus Torvalds 
5469bfeb691SDavid Woodhouse 	kfree(buf);
5479bfeb691SDavid Woodhouse 
5481da177e4SLinus Torvalds 	/* Fix up the original jeb now it's on the bad_list */
5499bfeb691SDavid Woodhouse 	if (first_raw == jeb->first_node) {
5509c261b33SJoe Perches 		jffs2_dbg(1, "Failing block at %08x is now empty. Moving to erase_pending_list\n",
5519c261b33SJoe Perches 			  jeb->offset);
552f116629dSAkinobu Mita 		list_move(&jeb->list, &c->erase_pending_list);
5531da177e4SLinus Torvalds 		c->nr_erasing_blocks++;
554ae3b6ba0SDavid Woodhouse 		jffs2_garbage_collect_trigger(c);
5551da177e4SLinus Torvalds 	}
5561da177e4SLinus Torvalds 
557e0c8e42fSArtem B. Bityutskiy 	jffs2_dbg_acct_sanity_check_nolock(c, jeb);
558e0c8e42fSArtem B. Bityutskiy 	jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
5591da177e4SLinus Torvalds 
560e0c8e42fSArtem B. Bityutskiy 	jffs2_dbg_acct_sanity_check_nolock(c, new_jeb);
561e0c8e42fSArtem B. Bityutskiy 	jffs2_dbg_acct_paranoia_check_nolock(c, new_jeb);
5621da177e4SLinus Torvalds 
5631da177e4SLinus Torvalds 	spin_unlock(&c->erase_completion_lock);
5641da177e4SLinus Torvalds 
5659c261b33SJoe Perches 	jffs2_dbg(1, "wbuf recovery completed OK. wbuf_ofs 0x%08x, len 0x%x\n",
5669c261b33SJoe Perches 		  c->wbuf_ofs, c->wbuf_len);
5679bfeb691SDavid Woodhouse 
5681da177e4SLinus Torvalds }
5691da177e4SLinus Torvalds 
5701da177e4SLinus Torvalds /* Meaning of pad argument:
5711da177e4SLinus Torvalds    0: Do not pad. Probably pointless - we only ever use this when we can't pad anyway.
5721da177e4SLinus Torvalds    1: Pad, do not adjust nextblock free_size
5731da177e4SLinus Torvalds    2: Pad, adjust nextblock free_size
5741da177e4SLinus Torvalds */
5751da177e4SLinus Torvalds #define NOPAD		0
5761da177e4SLinus Torvalds #define PAD_NOACCOUNT	1
5771da177e4SLinus Torvalds #define PAD_ACCOUNTING	2
5781da177e4SLinus Torvalds 
5791da177e4SLinus Torvalds static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
5801da177e4SLinus Torvalds {
5819bfeb691SDavid Woodhouse 	struct jffs2_eraseblock *wbuf_jeb;
5821da177e4SLinus Torvalds 	int ret;
5831da177e4SLinus Torvalds 	size_t retlen;
5841da177e4SLinus Torvalds 
5853be36675SAndrew Victor 	/* Nothing to do if not write-buffering the flash. In particular, we shouldn't
5861da177e4SLinus Torvalds 	   del_timer() the timer we never initialised. */
5873be36675SAndrew Victor 	if (!jffs2_is_writebuffered(c))
5881da177e4SLinus Torvalds 		return 0;
5891da177e4SLinus Torvalds 
59051b11e36SAlexey Khoroshilov 	if (!mutex_is_locked(&c->alloc_sem)) {
591da320f05SJoe Perches 		pr_crit("jffs2_flush_wbuf() called with alloc_sem not locked!\n");
5921da177e4SLinus Torvalds 		BUG();
5931da177e4SLinus Torvalds 	}
5941da177e4SLinus Torvalds 
5953be36675SAndrew Victor 	if (!c->wbuf_len)	/* already checked c->wbuf above */
5961da177e4SLinus Torvalds 		return 0;
5971da177e4SLinus Torvalds 
5989bfeb691SDavid Woodhouse 	wbuf_jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
5999bfeb691SDavid Woodhouse 	if (jffs2_prealloc_raw_node_refs(c, wbuf_jeb, c->nextblock->allocated_refs + 1))
6002f785402SDavid Woodhouse 		return -ENOMEM;
6012f785402SDavid Woodhouse 
6021da177e4SLinus Torvalds 	/* claim remaining space on the page
6031da177e4SLinus Torvalds 	   this happens, if we have a change to a new block,
6041da177e4SLinus Torvalds 	   or if fsync forces us to flush the writebuffer.
6051da177e4SLinus Torvalds 	   if we have a switch to next page, we will not have
6061da177e4SLinus Torvalds 	   enough remaining space for this.
6071da177e4SLinus Torvalds 	*/
608daba5cc4SArtem B. Bityutskiy 	if (pad ) {
6091da177e4SLinus Torvalds 		c->wbuf_len = PAD(c->wbuf_len);
6101da177e4SLinus Torvalds 
6111da177e4SLinus Torvalds 		/* Pad with JFFS2_DIRTY_BITMASK initially.  this helps out ECC'd NOR
6121da177e4SLinus Torvalds 		   with 8 byte page size */
6131da177e4SLinus Torvalds 		memset(c->wbuf + c->wbuf_len, 0, c->wbuf_pagesize - c->wbuf_len);
6141da177e4SLinus Torvalds 
6151da177e4SLinus Torvalds 		if ( c->wbuf_len + sizeof(struct jffs2_unknown_node) < c->wbuf_pagesize) {
6161da177e4SLinus Torvalds 			struct jffs2_unknown_node *padnode = (void *)(c->wbuf + c->wbuf_len);
6171da177e4SLinus Torvalds 			padnode->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
6181da177e4SLinus Torvalds 			padnode->nodetype = cpu_to_je16(JFFS2_NODETYPE_PADDING);
6191da177e4SLinus Torvalds 			padnode->totlen = cpu_to_je32(c->wbuf_pagesize - c->wbuf_len);
6201da177e4SLinus Torvalds 			padnode->hdr_crc = cpu_to_je32(crc32(0, padnode, sizeof(*padnode)-4));
6211da177e4SLinus Torvalds 		}
6221da177e4SLinus Torvalds 	}
6231da177e4SLinus Torvalds 	/* else jffs2_flash_writev has actually filled in the rest of the
6241da177e4SLinus Torvalds 	   buffer for us, and will deal with the node refs etc. later. */
6251da177e4SLinus Torvalds 
6261da177e4SLinus Torvalds #ifdef BREAKME
6271da177e4SLinus Torvalds 	static int breakme;
6281da177e4SLinus Torvalds 	if (breakme++ == 20) {
629da320f05SJoe Perches 		pr_notice("Faking write error at 0x%08x\n", c->wbuf_ofs);
6301da177e4SLinus Torvalds 		breakme = 0;
631eda95cbfSArtem Bityutskiy 		mtd_write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen,
6329223a456SThomas Gleixner 			  brokenbuf);
6331da177e4SLinus Torvalds 		ret = -EIO;
6341da177e4SLinus Torvalds 	} else
6351da177e4SLinus Torvalds #endif
6361da177e4SLinus Torvalds 
637eda95cbfSArtem Bityutskiy 		ret = mtd_write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize,
638eda95cbfSArtem Bityutskiy 				&retlen, c->wbuf);
6391da177e4SLinus Torvalds 
640a6bc432eSDavid Woodhouse 	if (ret) {
641da320f05SJoe Perches 		pr_warn("jffs2_flush_wbuf(): Write failed with %d\n", ret);
642a6bc432eSDavid Woodhouse 		goto wfail;
643a6bc432eSDavid Woodhouse 	} else if (retlen != c->wbuf_pagesize) {
644da320f05SJoe Perches 		pr_warn("jffs2_flush_wbuf(): Write was short: %zd instead of %d\n",
6451da177e4SLinus Torvalds 			retlen, c->wbuf_pagesize);
6461da177e4SLinus Torvalds 		ret = -EIO;
647a6bc432eSDavid Woodhouse 		goto wfail;
648a6bc432eSDavid Woodhouse 	} else if ((ret = jffs2_verify_write(c, c->wbuf, c->wbuf_ofs))) {
649a6bc432eSDavid Woodhouse 	wfail:
6501da177e4SLinus Torvalds 		jffs2_wbuf_recover(c);
6511da177e4SLinus Torvalds 
6521da177e4SLinus Torvalds 		return ret;
6531da177e4SLinus Torvalds 	}
6541da177e4SLinus Torvalds 
6551da177e4SLinus Torvalds 	/* Adjust free size of the block if we padded. */
656daba5cc4SArtem B. Bityutskiy 	if (pad) {
6570bcc099dSDavid Woodhouse 		uint32_t waste = c->wbuf_pagesize - c->wbuf_len;
6581da177e4SLinus Torvalds 
6599c261b33SJoe Perches 		jffs2_dbg(1, "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n",
6609c261b33SJoe Perches 			  (wbuf_jeb == c->nextblock) ? "next" : "",
6619c261b33SJoe Perches 			  wbuf_jeb->offset);
6621da177e4SLinus Torvalds 
6631da177e4SLinus Torvalds 		/* wbuf_pagesize - wbuf_len is the amount of space that's to be
6641da177e4SLinus Torvalds 		   padded. If there is less free space in the block than that,
6651da177e4SLinus Torvalds 		   something screwed up */
6669bfeb691SDavid Woodhouse 		if (wbuf_jeb->free_size < waste) {
667da320f05SJoe Perches 			pr_crit("jffs2_flush_wbuf(): Accounting error. wbuf at 0x%08x has 0x%03x bytes, 0x%03x left.\n",
6680bcc099dSDavid Woodhouse 				c->wbuf_ofs, c->wbuf_len, waste);
669da320f05SJoe Perches 			pr_crit("jffs2_flush_wbuf(): But free_size for block at 0x%08x is only 0x%08x\n",
6709bfeb691SDavid Woodhouse 				wbuf_jeb->offset, wbuf_jeb->free_size);
6711da177e4SLinus Torvalds 			BUG();
6721da177e4SLinus Torvalds 		}
6730bcc099dSDavid Woodhouse 
6740bcc099dSDavid Woodhouse 		spin_lock(&c->erase_completion_lock);
6750bcc099dSDavid Woodhouse 
6769bfeb691SDavid Woodhouse 		jffs2_link_node_ref(c, wbuf_jeb, (c->wbuf_ofs + c->wbuf_len) | REF_OBSOLETE, waste, NULL);
6770bcc099dSDavid Woodhouse 		/* FIXME: that made it count as dirty. Convert to wasted */
6789bfeb691SDavid Woodhouse 		wbuf_jeb->dirty_size -= waste;
6790bcc099dSDavid Woodhouse 		c->dirty_size -= waste;
6809bfeb691SDavid Woodhouse 		wbuf_jeb->wasted_size += waste;
6810bcc099dSDavid Woodhouse 		c->wasted_size += waste;
6820bcc099dSDavid Woodhouse 	} else
6830bcc099dSDavid Woodhouse 		spin_lock(&c->erase_completion_lock);
6841da177e4SLinus Torvalds 
6851da177e4SLinus Torvalds 	/* Stick any now-obsoleted blocks on the erase_pending_list */
6861da177e4SLinus Torvalds 	jffs2_refile_wbuf_blocks(c);
6871da177e4SLinus Torvalds 	jffs2_clear_wbuf_ino_list(c);
6881da177e4SLinus Torvalds 	spin_unlock(&c->erase_completion_lock);
6891da177e4SLinus Torvalds 
6901da177e4SLinus Torvalds 	memset(c->wbuf,0xff,c->wbuf_pagesize);
6911da177e4SLinus Torvalds 	/* adjust write buffer offset, else we get a non contiguous write bug */
6921da177e4SLinus Torvalds 	c->wbuf_ofs += c->wbuf_pagesize;
6931da177e4SLinus Torvalds 	c->wbuf_len = 0;
6941da177e4SLinus Torvalds 	return 0;
6951da177e4SLinus Torvalds }
6961da177e4SLinus Torvalds 
6971da177e4SLinus Torvalds /* Trigger garbage collection to flush the write-buffer.
6981da177e4SLinus Torvalds    If ino arg is zero, do it if _any_ real (i.e. not GC) writes are
6991da177e4SLinus Torvalds    outstanding. If ino arg non-zero, do it only if a write for the
7001da177e4SLinus Torvalds    given inode is outstanding. */
7011da177e4SLinus Torvalds int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino)
7021da177e4SLinus Torvalds {
7031da177e4SLinus Torvalds 	uint32_t old_wbuf_ofs;
7041da177e4SLinus Torvalds 	uint32_t old_wbuf_len;
7051da177e4SLinus Torvalds 	int ret = 0;
7061da177e4SLinus Torvalds 
7079c261b33SJoe Perches 	jffs2_dbg(1, "jffs2_flush_wbuf_gc() called for ino #%u...\n", ino);
7081da177e4SLinus Torvalds 
7098aee6ac1SDavid Woodhouse 	if (!c->wbuf)
7108aee6ac1SDavid Woodhouse 		return 0;
7118aee6ac1SDavid Woodhouse 
712ced22070SDavid Woodhouse 	mutex_lock(&c->alloc_sem);
7131da177e4SLinus Torvalds 	if (!jffs2_wbuf_pending_for_ino(c, ino)) {
7149c261b33SJoe Perches 		jffs2_dbg(1, "Ino #%d not pending in wbuf. Returning\n", ino);
715ced22070SDavid Woodhouse 		mutex_unlock(&c->alloc_sem);
7161da177e4SLinus Torvalds 		return 0;
7171da177e4SLinus Torvalds 	}
7181da177e4SLinus Torvalds 
7191da177e4SLinus Torvalds 	old_wbuf_ofs = c->wbuf_ofs;
7201da177e4SLinus Torvalds 	old_wbuf_len = c->wbuf_len;
7211da177e4SLinus Torvalds 
7221da177e4SLinus Torvalds 	if (c->unchecked_size) {
7231da177e4SLinus Torvalds 		/* GC won't make any progress for a while */
7249c261b33SJoe Perches 		jffs2_dbg(1, "%s(): padding. Not finished checking\n",
7259c261b33SJoe Perches 			  __func__);
7261da177e4SLinus Torvalds 		down_write(&c->wbuf_sem);
7271da177e4SLinus Torvalds 		ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
7287f716cf3SEstelle Hammache 		/* retry flushing wbuf in case jffs2_wbuf_recover
7297f716cf3SEstelle Hammache 		   left some data in the wbuf */
7307f716cf3SEstelle Hammache 		if (ret)
7317f716cf3SEstelle Hammache 			ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
7321da177e4SLinus Torvalds 		up_write(&c->wbuf_sem);
7331da177e4SLinus Torvalds 	} else while (old_wbuf_len &&
7341da177e4SLinus Torvalds 		      old_wbuf_ofs == c->wbuf_ofs) {
7351da177e4SLinus Torvalds 
736ced22070SDavid Woodhouse 		mutex_unlock(&c->alloc_sem);
7371da177e4SLinus Torvalds 
7389c261b33SJoe Perches 		jffs2_dbg(1, "%s(): calls gc pass\n", __func__);
7391da177e4SLinus Torvalds 
7401da177e4SLinus Torvalds 		ret = jffs2_garbage_collect_pass(c);
7411da177e4SLinus Torvalds 		if (ret) {
7421da177e4SLinus Torvalds 			/* GC failed. Flush it with padding instead */
743ced22070SDavid Woodhouse 			mutex_lock(&c->alloc_sem);
7441da177e4SLinus Torvalds 			down_write(&c->wbuf_sem);
7451da177e4SLinus Torvalds 			ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
7467f716cf3SEstelle Hammache 			/* retry flushing wbuf in case jffs2_wbuf_recover
7477f716cf3SEstelle Hammache 			   left some data in the wbuf */
7487f716cf3SEstelle Hammache 			if (ret)
7497f716cf3SEstelle Hammache 				ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
7501da177e4SLinus Torvalds 			up_write(&c->wbuf_sem);
7511da177e4SLinus Torvalds 			break;
7521da177e4SLinus Torvalds 		}
753ced22070SDavid Woodhouse 		mutex_lock(&c->alloc_sem);
7541da177e4SLinus Torvalds 	}
7551da177e4SLinus Torvalds 
7569c261b33SJoe Perches 	jffs2_dbg(1, "%s(): ends...\n", __func__);
7571da177e4SLinus Torvalds 
758ced22070SDavid Woodhouse 	mutex_unlock(&c->alloc_sem);
7591da177e4SLinus Torvalds 	return ret;
7601da177e4SLinus Torvalds }
7611da177e4SLinus Torvalds 
7621da177e4SLinus Torvalds /* Pad write-buffer to end and write it, wasting space. */
7631da177e4SLinus Torvalds int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c)
7641da177e4SLinus Torvalds {
7651da177e4SLinus Torvalds 	int ret;
7661da177e4SLinus Torvalds 
7678aee6ac1SDavid Woodhouse 	if (!c->wbuf)
7688aee6ac1SDavid Woodhouse 		return 0;
7698aee6ac1SDavid Woodhouse 
7701da177e4SLinus Torvalds 	down_write(&c->wbuf_sem);
7711da177e4SLinus Torvalds 	ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
7727f716cf3SEstelle Hammache 	/* retry - maybe wbuf recover left some data in wbuf. */
7737f716cf3SEstelle Hammache 	if (ret)
7747f716cf3SEstelle Hammache 		ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
7751da177e4SLinus Torvalds 	up_write(&c->wbuf_sem);
7761da177e4SLinus Torvalds 
7771da177e4SLinus Torvalds 	return ret;
7781da177e4SLinus Torvalds }
7791da177e4SLinus Torvalds 
780dcb09328SThomas Gleixner static size_t jffs2_fill_wbuf(struct jffs2_sb_info *c, const uint8_t *buf,
781dcb09328SThomas Gleixner 			      size_t len)
782dcb09328SThomas Gleixner {
783dcb09328SThomas Gleixner 	if (len && !c->wbuf_len && (len >= c->wbuf_pagesize))
784dcb09328SThomas Gleixner 		return 0;
785dcb09328SThomas Gleixner 
786dcb09328SThomas Gleixner 	if (len > (c->wbuf_pagesize - c->wbuf_len))
787dcb09328SThomas Gleixner 		len = c->wbuf_pagesize - c->wbuf_len;
788dcb09328SThomas Gleixner 	memcpy(c->wbuf + c->wbuf_len, buf, len);
789dcb09328SThomas Gleixner 	c->wbuf_len += (uint32_t) len;
790dcb09328SThomas Gleixner 	return len;
791dcb09328SThomas Gleixner }
792dcb09328SThomas Gleixner 
793dcb09328SThomas Gleixner int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs,
794dcb09328SThomas Gleixner 		       unsigned long count, loff_t to, size_t *retlen,
795dcb09328SThomas Gleixner 		       uint32_t ino)
796dcb09328SThomas Gleixner {
797dcb09328SThomas Gleixner 	struct jffs2_eraseblock *jeb;
798dcb09328SThomas Gleixner 	size_t wbuf_retlen, donelen = 0;
799dcb09328SThomas Gleixner 	uint32_t outvec_to = to;
800dcb09328SThomas Gleixner 	int ret, invec;
801dcb09328SThomas Gleixner 
802dcb09328SThomas Gleixner 	/* If not writebuffered flash, don't bother */
8033be36675SAndrew Victor 	if (!jffs2_is_writebuffered(c))
8041da177e4SLinus Torvalds 		return jffs2_flash_direct_writev(c, invecs, count, to, retlen);
8051da177e4SLinus Torvalds 
8061da177e4SLinus Torvalds 	down_write(&c->wbuf_sem);
8071da177e4SLinus Torvalds 
8081da177e4SLinus Torvalds 	/* If wbuf_ofs is not initialized, set it to target address */
8091da177e4SLinus Torvalds 	if (c->wbuf_ofs == 0xFFFFFFFF) {
8101da177e4SLinus Torvalds 		c->wbuf_ofs = PAGE_DIV(to);
8111da177e4SLinus Torvalds 		c->wbuf_len = PAGE_MOD(to);
8121da177e4SLinus Torvalds 		memset(c->wbuf,0xff,c->wbuf_pagesize);
8131da177e4SLinus Torvalds 	}
8141da177e4SLinus Torvalds 
815dcb09328SThomas Gleixner 	/*
816dcb09328SThomas Gleixner 	 * Sanity checks on target address.  It's permitted to write
817dcb09328SThomas Gleixner 	 * at PAD(c->wbuf_len+c->wbuf_ofs), and it's permitted to
818dcb09328SThomas Gleixner 	 * write at the beginning of a new erase block. Anything else,
819dcb09328SThomas Gleixner 	 * and you die.  New block starts at xxx000c (0-b = block
820dcb09328SThomas Gleixner 	 * header)
8211da177e4SLinus Torvalds 	 */
8223be36675SAndrew Victor 	if (SECTOR_ADDR(to) != SECTOR_ADDR(c->wbuf_ofs)) {
8231da177e4SLinus Torvalds 		/* It's a write to a new block */
8241da177e4SLinus Torvalds 		if (c->wbuf_len) {
8259c261b33SJoe Perches 			jffs2_dbg(1, "%s(): to 0x%lx causes flush of wbuf at 0x%08x\n",
8269c261b33SJoe Perches 				  __func__, (unsigned long)to, c->wbuf_ofs);
8271da177e4SLinus Torvalds 			ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
828dcb09328SThomas Gleixner 			if (ret)
829dcb09328SThomas Gleixner 				goto outerr;
8301da177e4SLinus Torvalds 		}
8311da177e4SLinus Torvalds 		/* set pointer to new block */
8321da177e4SLinus Torvalds 		c->wbuf_ofs = PAGE_DIV(to);
8331da177e4SLinus Torvalds 		c->wbuf_len = PAGE_MOD(to);
8341da177e4SLinus Torvalds 	}
8351da177e4SLinus Torvalds 
8361da177e4SLinus Torvalds 	if (to != PAD(c->wbuf_ofs + c->wbuf_len)) {
8371da177e4SLinus Torvalds 		/* We're not writing immediately after the writebuffer. Bad. */
838da320f05SJoe Perches 		pr_crit("%s(): Non-contiguous write to %08lx\n",
8399c261b33SJoe Perches 			__func__, (unsigned long)to);
8401da177e4SLinus Torvalds 		if (c->wbuf_len)
841da320f05SJoe Perches 			pr_crit("wbuf was previously %08x-%08x\n",
8421da177e4SLinus Torvalds 				c->wbuf_ofs, c->wbuf_ofs + c->wbuf_len);
8431da177e4SLinus Torvalds 		BUG();
8441da177e4SLinus Torvalds 	}
8451da177e4SLinus Torvalds 
8461da177e4SLinus Torvalds 	/* adjust alignment offset */
8471da177e4SLinus Torvalds 	if (c->wbuf_len != PAGE_MOD(to)) {
8481da177e4SLinus Torvalds 		c->wbuf_len = PAGE_MOD(to);
8491da177e4SLinus Torvalds 		/* take care of alignment to next page */
850dcb09328SThomas Gleixner 		if (!c->wbuf_len) {
8511da177e4SLinus Torvalds 			c->wbuf_len = c->wbuf_pagesize;
8521da177e4SLinus Torvalds 			ret = __jffs2_flush_wbuf(c, NOPAD);
853dcb09328SThomas Gleixner 			if (ret)
854dcb09328SThomas Gleixner 				goto outerr;
8551da177e4SLinus Torvalds 		}
8561da177e4SLinus Torvalds 	}
8571da177e4SLinus Torvalds 
858dcb09328SThomas Gleixner 	for (invec = 0; invec < count; invec++) {
859dcb09328SThomas Gleixner 		int vlen = invecs[invec].iov_len;
860dcb09328SThomas Gleixner 		uint8_t *v = invecs[invec].iov_base;
8611da177e4SLinus Torvalds 
862dcb09328SThomas Gleixner 		wbuf_retlen = jffs2_fill_wbuf(c, v, vlen);
8631da177e4SLinus Torvalds 
864dcb09328SThomas Gleixner 		if (c->wbuf_len == c->wbuf_pagesize) {
865dcb09328SThomas Gleixner 			ret = __jffs2_flush_wbuf(c, NOPAD);
866dcb09328SThomas Gleixner 			if (ret)
867dcb09328SThomas Gleixner 				goto outerr;
8681da177e4SLinus Torvalds 		}
869dcb09328SThomas Gleixner 		vlen -= wbuf_retlen;
870dcb09328SThomas Gleixner 		outvec_to += wbuf_retlen;
8711da177e4SLinus Torvalds 		donelen += wbuf_retlen;
872dcb09328SThomas Gleixner 		v += wbuf_retlen;
8731da177e4SLinus Torvalds 
874dcb09328SThomas Gleixner 		if (vlen >= c->wbuf_pagesize) {
875eda95cbfSArtem Bityutskiy 			ret = mtd_write(c->mtd, outvec_to, PAGE_DIV(vlen),
876dcb09328SThomas Gleixner 					&wbuf_retlen, v);
877dcb09328SThomas Gleixner 			if (ret < 0 || wbuf_retlen != PAGE_DIV(vlen))
878dcb09328SThomas Gleixner 				goto outfile;
879dcb09328SThomas Gleixner 
880dcb09328SThomas Gleixner 			vlen -= wbuf_retlen;
881dcb09328SThomas Gleixner 			outvec_to += wbuf_retlen;
882dcb09328SThomas Gleixner 			c->wbuf_ofs = outvec_to;
883dcb09328SThomas Gleixner 			donelen += wbuf_retlen;
884dcb09328SThomas Gleixner 			v += wbuf_retlen;
8851da177e4SLinus Torvalds 		}
8861da177e4SLinus Torvalds 
887dcb09328SThomas Gleixner 		wbuf_retlen = jffs2_fill_wbuf(c, v, vlen);
888dcb09328SThomas Gleixner 		if (c->wbuf_len == c->wbuf_pagesize) {
889dcb09328SThomas Gleixner 			ret = __jffs2_flush_wbuf(c, NOPAD);
890dcb09328SThomas Gleixner 			if (ret)
891dcb09328SThomas Gleixner 				goto outerr;
8921da177e4SLinus Torvalds 		}
8931da177e4SLinus Torvalds 
894dcb09328SThomas Gleixner 		outvec_to += wbuf_retlen;
895dcb09328SThomas Gleixner 		donelen += wbuf_retlen;
8961da177e4SLinus Torvalds 	}
8971da177e4SLinus Torvalds 
898dcb09328SThomas Gleixner 	/*
899dcb09328SThomas Gleixner 	 * If there's a remainder in the wbuf and it's a non-GC write,
900dcb09328SThomas Gleixner 	 * remember that the wbuf affects this ino
901dcb09328SThomas Gleixner 	 */
9021da177e4SLinus Torvalds 	*retlen = donelen;
9031da177e4SLinus Torvalds 
904e631ddbaSFerenc Havasi 	if (jffs2_sum_active()) {
905e631ddbaSFerenc Havasi 		int res = jffs2_sum_add_kvec(c, invecs, count, (uint32_t) to);
906e631ddbaSFerenc Havasi 		if (res)
907e631ddbaSFerenc Havasi 			return res;
908e631ddbaSFerenc Havasi 	}
909e631ddbaSFerenc Havasi 
9101da177e4SLinus Torvalds 	if (c->wbuf_len && ino)
9111da177e4SLinus Torvalds 		jffs2_wbuf_dirties_inode(c, ino);
9121da177e4SLinus Torvalds 
9131da177e4SLinus Torvalds 	ret = 0;
914dcb09328SThomas Gleixner 	up_write(&c->wbuf_sem);
915dcb09328SThomas Gleixner 	return ret;
9161da177e4SLinus Torvalds 
917dcb09328SThomas Gleixner outfile:
918dcb09328SThomas Gleixner 	/*
919dcb09328SThomas Gleixner 	 * At this point we have no problem, c->wbuf is empty. However
920dcb09328SThomas Gleixner 	 * refile nextblock to avoid writing again to same address.
921dcb09328SThomas Gleixner 	 */
922dcb09328SThomas Gleixner 
923dcb09328SThomas Gleixner 	spin_lock(&c->erase_completion_lock);
924dcb09328SThomas Gleixner 
925dcb09328SThomas Gleixner 	jeb = &c->blocks[outvec_to / c->sector_size];
926dcb09328SThomas Gleixner 	jffs2_block_refile(c, jeb, REFILE_ANYWAY);
927dcb09328SThomas Gleixner 
928dcb09328SThomas Gleixner 	spin_unlock(&c->erase_completion_lock);
929dcb09328SThomas Gleixner 
930dcb09328SThomas Gleixner outerr:
931dcb09328SThomas Gleixner 	*retlen = 0;
9321da177e4SLinus Torvalds 	up_write(&c->wbuf_sem);
9331da177e4SLinus Torvalds 	return ret;
9341da177e4SLinus Torvalds }
9351da177e4SLinus Torvalds 
9361da177e4SLinus Torvalds /*
9371da177e4SLinus Torvalds  *	This is the entry for flash write.
9381da177e4SLinus Torvalds  *	Check, if we work on NAND FLASH, if so build an kvec and write it via vritev
9391da177e4SLinus Torvalds */
9409bfeb691SDavid Woodhouse int jffs2_flash_write(struct jffs2_sb_info *c, loff_t ofs, size_t len,
9419bfeb691SDavid Woodhouse 		      size_t *retlen, const u_char *buf)
9421da177e4SLinus Torvalds {
9431da177e4SLinus Torvalds 	struct kvec vecs[1];
9441da177e4SLinus Torvalds 
9453be36675SAndrew Victor 	if (!jffs2_is_writebuffered(c))
946e631ddbaSFerenc Havasi 		return jffs2_flash_direct_write(c, ofs, len, retlen, buf);
9471da177e4SLinus Torvalds 
9481da177e4SLinus Torvalds 	vecs[0].iov_base = (unsigned char *) buf;
9491da177e4SLinus Torvalds 	vecs[0].iov_len = len;
9501da177e4SLinus Torvalds 	return jffs2_flash_writev(c, vecs, 1, ofs, retlen, 0);
9511da177e4SLinus Torvalds }
9521da177e4SLinus Torvalds 
9531da177e4SLinus Torvalds /*
9541da177e4SLinus Torvalds 	Handle readback from writebuffer and ECC failure return
9551da177e4SLinus Torvalds */
9561da177e4SLinus Torvalds int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, u_char *buf)
9571da177e4SLinus Torvalds {
9581da177e4SLinus Torvalds 	loff_t	orbf = 0, owbf = 0, lwbf = 0;
9591da177e4SLinus Torvalds 	int	ret;
9601da177e4SLinus Torvalds 
9613be36675SAndrew Victor 	if (!jffs2_is_writebuffered(c))
962329ad399SArtem Bityutskiy 		return mtd_read(c->mtd, ofs, len, retlen, buf);
9631da177e4SLinus Torvalds 
9643be36675SAndrew Victor 	/* Read flash */
965894214d1SArtem B. Bityuckiy 	down_read(&c->wbuf_sem);
966329ad399SArtem Bityutskiy 	ret = mtd_read(c->mtd, ofs, len, retlen, buf);
9671da177e4SLinus Torvalds 
9689a1fcdfdSThomas Gleixner 	if ( (ret == -EBADMSG || ret == -EUCLEAN) && (*retlen == len) ) {
9699a1fcdfdSThomas Gleixner 		if (ret == -EBADMSG)
970da320f05SJoe Perches 			pr_warn("mtd->read(0x%zx bytes from 0x%llx) returned ECC error\n",
971da320f05SJoe Perches 				len, ofs);
9721da177e4SLinus Torvalds 		/*
9739a1fcdfdSThomas Gleixner 		 * We have the raw data without ECC correction in the buffer,
9749a1fcdfdSThomas Gleixner 		 * maybe we are lucky and all data or parts are correct. We
9759a1fcdfdSThomas Gleixner 		 * check the node.  If data are corrupted node check will sort
9769a1fcdfdSThomas Gleixner 		 * it out.  We keep this block, it will fail on write or erase
9779a1fcdfdSThomas Gleixner 		 * and the we mark it bad. Or should we do that now? But we
9789a1fcdfdSThomas Gleixner 		 * should give him a chance.  Maybe we had a system crash or
9799a1fcdfdSThomas Gleixner 		 * power loss before the ecc write or a erase was completed.
9801da177e4SLinus Torvalds 		 * So we return success. :)
9811da177e4SLinus Torvalds 		 */
9821da177e4SLinus Torvalds 		ret = 0;
9831da177e4SLinus Torvalds 	}
9841da177e4SLinus Torvalds 
9851da177e4SLinus Torvalds 	/* if no writebuffer available or write buffer empty, return */
9861da177e4SLinus Torvalds 	if (!c->wbuf_pagesize || !c->wbuf_len)
987894214d1SArtem B. Bityuckiy 		goto exit;
9881da177e4SLinus Torvalds 
9891da177e4SLinus Torvalds 	/* if we read in a different block, return */
9903be36675SAndrew Victor 	if (SECTOR_ADDR(ofs) != SECTOR_ADDR(c->wbuf_ofs))
991894214d1SArtem B. Bityuckiy 		goto exit;
9921da177e4SLinus Torvalds 
9931da177e4SLinus Torvalds 	if (ofs >= c->wbuf_ofs) {
9941da177e4SLinus Torvalds 		owbf = (ofs - c->wbuf_ofs);	/* offset in write buffer */
9951da177e4SLinus Torvalds 		if (owbf > c->wbuf_len)		/* is read beyond write buffer ? */
9961da177e4SLinus Torvalds 			goto exit;
9971da177e4SLinus Torvalds 		lwbf = c->wbuf_len - owbf;	/* number of bytes to copy */
9981da177e4SLinus Torvalds 		if (lwbf > len)
9991da177e4SLinus Torvalds 			lwbf = len;
10001da177e4SLinus Torvalds 	} else {
10011da177e4SLinus Torvalds 		orbf = (c->wbuf_ofs - ofs);	/* offset in read buffer */
10021da177e4SLinus Torvalds 		if (orbf > len)			/* is write beyond write buffer ? */
10031da177e4SLinus Torvalds 			goto exit;
10041da177e4SLinus Torvalds 		lwbf = len - orbf;		/* number of bytes to copy */
10051da177e4SLinus Torvalds 		if (lwbf > c->wbuf_len)
10061da177e4SLinus Torvalds 			lwbf = c->wbuf_len;
10071da177e4SLinus Torvalds 	}
10081da177e4SLinus Torvalds 	if (lwbf > 0)
10091da177e4SLinus Torvalds 		memcpy(buf+orbf,c->wbuf+owbf,lwbf);
10101da177e4SLinus Torvalds 
10111da177e4SLinus Torvalds exit:
10121da177e4SLinus Torvalds 	up_read(&c->wbuf_sem);
10131da177e4SLinus Torvalds 	return ret;
10141da177e4SLinus Torvalds }
10151da177e4SLinus Torvalds 
10168593fbc6SThomas Gleixner #define NR_OOB_SCAN_PAGES 4
10178593fbc6SThomas Gleixner 
101809b3fba5SDavid Woodhouse /* For historical reasons we use only 8 bytes for OOB clean marker */
101909b3fba5SDavid Woodhouse #define OOB_CM_SIZE 8
1020a7a6ace1SArtem Bityutskiy 
1021a7a6ace1SArtem Bityutskiy static const struct jffs2_unknown_node oob_cleanmarker =
1022a7a6ace1SArtem Bityutskiy {
1023566865a2SDavid Woodhouse 	.magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
1024566865a2SDavid Woodhouse 	.nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
1025566865a2SDavid Woodhouse 	.totlen = constant_cpu_to_je32(8)
1026a7a6ace1SArtem Bityutskiy };
1027a7a6ace1SArtem Bityutskiy 
10281da177e4SLinus Torvalds /*
1029a7a6ace1SArtem Bityutskiy  * Check, if the out of band area is empty. This function knows about the clean
1030a7a6ace1SArtem Bityutskiy  * marker and if it is present in OOB, treats the OOB as empty anyway.
10311da177e4SLinus Torvalds  */
10328593fbc6SThomas Gleixner int jffs2_check_oob_empty(struct jffs2_sb_info *c,
10338593fbc6SThomas Gleixner 			  struct jffs2_eraseblock *jeb, int mode)
10341da177e4SLinus Torvalds {
1035a7a6ace1SArtem Bityutskiy 	int i, ret;
1036a7a6ace1SArtem Bityutskiy 	int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
10378593fbc6SThomas Gleixner 	struct mtd_oob_ops ops;
10381da177e4SLinus Torvalds 
10390612b9ddSBrian Norris 	ops.mode = MTD_OPS_AUTO_OOB;
1040a7a6ace1SArtem Bityutskiy 	ops.ooblen = NR_OOB_SCAN_PAGES * c->oobavail;
10418593fbc6SThomas Gleixner 	ops.oobbuf = c->oobbuf;
1042a7a6ace1SArtem Bityutskiy 	ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
10438593fbc6SThomas Gleixner 	ops.datbuf = NULL;
10448593fbc6SThomas Gleixner 
1045fd2819bbSArtem Bityutskiy 	ret = mtd_read_oob(c->mtd, jeb->offset, &ops);
1046a7a6ace1SArtem Bityutskiy 	if (ret || ops.oobretlen != ops.ooblen) {
1047da320f05SJoe Perches 		pr_err("cannot read OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n",
10487be26bfbSAndrew Morton 		       jeb->offset, ops.ooblen, ops.oobretlen, ret);
1049a7a6ace1SArtem Bityutskiy 		if (!ret)
1050a7a6ace1SArtem Bityutskiy 			ret = -EIO;
10518593fbc6SThomas Gleixner 		return ret;
10521da177e4SLinus Torvalds 	}
10531da177e4SLinus Torvalds 
1054a7a6ace1SArtem Bityutskiy 	for(i = 0; i < ops.ooblen; i++) {
1055a7a6ace1SArtem Bityutskiy 		if (mode && i < cmlen)
1056a7a6ace1SArtem Bityutskiy 			/* Yeah, we know about the cleanmarker */
10571da177e4SLinus Torvalds 			continue;
10581da177e4SLinus Torvalds 
10598593fbc6SThomas Gleixner 		if (ops.oobbuf[i] != 0xFF) {
10609c261b33SJoe Perches 			jffs2_dbg(2, "Found %02x at %x in OOB for "
10619c261b33SJoe Perches 				  "%08x\n", ops.oobbuf[i], i, jeb->offset);
10628593fbc6SThomas Gleixner 			return 1;
10631da177e4SLinus Torvalds 		}
10641da177e4SLinus Torvalds 	}
10651da177e4SLinus Torvalds 
10668593fbc6SThomas Gleixner 	return 0;
10671da177e4SLinus Torvalds }
10681da177e4SLinus Torvalds 
10691da177e4SLinus Torvalds /*
1070a7a6ace1SArtem Bityutskiy  * Check for a valid cleanmarker.
1071a7a6ace1SArtem Bityutskiy  * Returns: 0 if a valid cleanmarker was found
1072a7a6ace1SArtem Bityutskiy  *	    1 if no cleanmarker was found
1073a7a6ace1SArtem Bityutskiy  *	    negative error code if an error occurred
10741da177e4SLinus Torvalds  */
10758593fbc6SThomas Gleixner int jffs2_check_nand_cleanmarker(struct jffs2_sb_info *c,
10768593fbc6SThomas Gleixner 				 struct jffs2_eraseblock *jeb)
10771da177e4SLinus Torvalds {
10788593fbc6SThomas Gleixner 	struct mtd_oob_ops ops;
1079a7a6ace1SArtem Bityutskiy 	int ret, cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
10801da177e4SLinus Torvalds 
10810612b9ddSBrian Norris 	ops.mode = MTD_OPS_AUTO_OOB;
1082a7a6ace1SArtem Bityutskiy 	ops.ooblen = cmlen;
10838593fbc6SThomas Gleixner 	ops.oobbuf = c->oobbuf;
1084a7a6ace1SArtem Bityutskiy 	ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
10858593fbc6SThomas Gleixner 	ops.datbuf = NULL;
10868593fbc6SThomas Gleixner 
1087fd2819bbSArtem Bityutskiy 	ret = mtd_read_oob(c->mtd, jeb->offset, &ops);
1088a7a6ace1SArtem Bityutskiy 	if (ret || ops.oobretlen != ops.ooblen) {
1089da320f05SJoe Perches 		pr_err("cannot read OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n",
10907be26bfbSAndrew Morton 		       jeb->offset, ops.ooblen, ops.oobretlen, ret);
1091a7a6ace1SArtem Bityutskiy 		if (!ret)
1092a7a6ace1SArtem Bityutskiy 			ret = -EIO;
10931da177e4SLinus Torvalds 		return ret;
10941da177e4SLinus Torvalds 	}
10958593fbc6SThomas Gleixner 
1096a7a6ace1SArtem Bityutskiy 	return !!memcmp(&oob_cleanmarker, c->oobbuf, cmlen);
10971da177e4SLinus Torvalds }
10981da177e4SLinus Torvalds 
10998593fbc6SThomas Gleixner int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c,
11008593fbc6SThomas Gleixner 				 struct jffs2_eraseblock *jeb)
11011da177e4SLinus Torvalds {
11021da177e4SLinus Torvalds 	int ret;
11038593fbc6SThomas Gleixner 	struct mtd_oob_ops ops;
1104a7a6ace1SArtem Bityutskiy 	int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
11051da177e4SLinus Torvalds 
11060612b9ddSBrian Norris 	ops.mode = MTD_OPS_AUTO_OOB;
1107a7a6ace1SArtem Bityutskiy 	ops.ooblen = cmlen;
1108a7a6ace1SArtem Bityutskiy 	ops.oobbuf = (uint8_t *)&oob_cleanmarker;
1109a7a6ace1SArtem Bityutskiy 	ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
11108593fbc6SThomas Gleixner 	ops.datbuf = NULL;
11118593fbc6SThomas Gleixner 
1112a2cc5ba0SArtem Bityutskiy 	ret = mtd_write_oob(c->mtd, jeb->offset, &ops);
1113a7a6ace1SArtem Bityutskiy 	if (ret || ops.oobretlen != ops.ooblen) {
1114da320f05SJoe Perches 		pr_err("cannot write OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n",
11157be26bfbSAndrew Morton 		       jeb->offset, ops.ooblen, ops.oobretlen, ret);
1116a7a6ace1SArtem Bityutskiy 		if (!ret)
1117a7a6ace1SArtem Bityutskiy 			ret = -EIO;
11181da177e4SLinus Torvalds 		return ret;
11191da177e4SLinus Torvalds 	}
1120a7a6ace1SArtem Bityutskiy 
11211da177e4SLinus Torvalds 	return 0;
11221da177e4SLinus Torvalds }
11231da177e4SLinus Torvalds 
11241da177e4SLinus Torvalds /*
11251da177e4SLinus Torvalds  * On NAND we try to mark this block bad. If the block was erased more
112625985edcSLucas De Marchi  * than MAX_ERASE_FAILURES we mark it finally bad.
11271da177e4SLinus Torvalds  * Don't care about failures. This block remains on the erase-pending
11281da177e4SLinus Torvalds  * or badblock list as long as nobody manipulates the flash with
11291da177e4SLinus Torvalds  * a bootloader or something like that.
11301da177e4SLinus Torvalds  */
11311da177e4SLinus Torvalds 
11321da177e4SLinus Torvalds int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset)
11331da177e4SLinus Torvalds {
11341da177e4SLinus Torvalds 	int 	ret;
11351da177e4SLinus Torvalds 
11361da177e4SLinus Torvalds 	/* if the count is < max, we try to write the counter to the 2nd page oob area */
11371da177e4SLinus Torvalds 	if( ++jeb->bad_count < MAX_ERASE_FAILURES)
11381da177e4SLinus Torvalds 		return 0;
11391da177e4SLinus Torvalds 
11405a528957SJoe Perches 	pr_warn("marking eraseblock at %08x as bad\n", bad_offset);
11415942ddbcSArtem Bityutskiy 	ret = mtd_block_markbad(c->mtd, bad_offset);
11421da177e4SLinus Torvalds 
11431da177e4SLinus Torvalds 	if (ret) {
11449c261b33SJoe Perches 		jffs2_dbg(1, "%s(): Write failed for block at %08x: error %d\n",
11459c261b33SJoe Perches 			  __func__, jeb->offset, ret);
11461da177e4SLinus Torvalds 		return ret;
11471da177e4SLinus Torvalds 	}
11481da177e4SLinus Torvalds 	return 1;
11491da177e4SLinus Torvalds }
11501da177e4SLinus Torvalds 
1151a7a6ace1SArtem Bityutskiy int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
11521da177e4SLinus Torvalds {
11535bd34c09SThomas Gleixner 	struct nand_ecclayout *oinfo = c->mtd->ecclayout;
11541da177e4SLinus Torvalds 
11551da177e4SLinus Torvalds 	if (!c->mtd->oobsize)
11561da177e4SLinus Torvalds 		return 0;
11571da177e4SLinus Torvalds 
11581da177e4SLinus Torvalds 	/* Cleanmarker is out-of-band, so inline size zero */
11591da177e4SLinus Torvalds 	c->cleanmarker_size = 0;
11601da177e4SLinus Torvalds 
1161a7a6ace1SArtem Bityutskiy 	if (!oinfo || oinfo->oobavail == 0) {
1162da320f05SJoe Perches 		pr_err("inconsistent device description\n");
11631da177e4SLinus Torvalds 		return -EINVAL;
11641da177e4SLinus Torvalds 	}
11655bd34c09SThomas Gleixner 
11665a528957SJoe Perches 	jffs2_dbg(1, "using OOB on NAND\n");
11675bd34c09SThomas Gleixner 
1168a7a6ace1SArtem Bityutskiy 	c->oobavail = oinfo->oobavail;
11691da177e4SLinus Torvalds 
11701da177e4SLinus Torvalds 	/* Initialise write buffer */
11711da177e4SLinus Torvalds 	init_rwsem(&c->wbuf_sem);
117228318776SJoern Engel 	c->wbuf_pagesize = c->mtd->writesize;
11731da177e4SLinus Torvalds 	c->wbuf_ofs = 0xFFFFFFFF;
11741da177e4SLinus Torvalds 
11751da177e4SLinus Torvalds 	c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
11761da177e4SLinus Torvalds 	if (!c->wbuf)
11771da177e4SLinus Torvalds 		return -ENOMEM;
11781da177e4SLinus Torvalds 
1179a7a6ace1SArtem Bityutskiy 	c->oobbuf = kmalloc(NR_OOB_SCAN_PAGES * c->oobavail, GFP_KERNEL);
1180a7a6ace1SArtem Bityutskiy 	if (!c->oobbuf) {
11811da177e4SLinus Torvalds 		kfree(c->wbuf);
11821da177e4SLinus Torvalds 		return -ENOMEM;
11831da177e4SLinus Torvalds 	}
1184a7a6ace1SArtem Bityutskiy 
1185a6bc432eSDavid Woodhouse #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1186a6bc432eSDavid Woodhouse 	c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1187a6bc432eSDavid Woodhouse 	if (!c->wbuf_verify) {
1188a6bc432eSDavid Woodhouse 		kfree(c->oobbuf);
1189a6bc432eSDavid Woodhouse 		kfree(c->wbuf);
1190a6bc432eSDavid Woodhouse 		return -ENOMEM;
1191a6bc432eSDavid Woodhouse 	}
1192a6bc432eSDavid Woodhouse #endif
1193a7a6ace1SArtem Bityutskiy 	return 0;
11941da177e4SLinus Torvalds }
11951da177e4SLinus Torvalds 
11961da177e4SLinus Torvalds void jffs2_nand_flash_cleanup(struct jffs2_sb_info *c)
11971da177e4SLinus Torvalds {
1198a6bc432eSDavid Woodhouse #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1199a6bc432eSDavid Woodhouse 	kfree(c->wbuf_verify);
1200a6bc432eSDavid Woodhouse #endif
12011da177e4SLinus Torvalds 	kfree(c->wbuf);
12028593fbc6SThomas Gleixner 	kfree(c->oobbuf);
12031da177e4SLinus Torvalds }
12041da177e4SLinus Torvalds 
12058f15fd55SAndrew Victor int jffs2_dataflash_setup(struct jffs2_sb_info *c) {
12068f15fd55SAndrew Victor 	c->cleanmarker_size = 0;		/* No cleanmarkers needed */
12078f15fd55SAndrew Victor 
12088f15fd55SAndrew Victor 	/* Initialize write buffer */
12098f15fd55SAndrew Victor 	init_rwsem(&c->wbuf_sem);
12108f15fd55SAndrew Victor 
1211daba5cc4SArtem B. Bityutskiy 
1212daba5cc4SArtem B. Bityutskiy 	c->wbuf_pagesize =  c->mtd->erasesize;
1213daba5cc4SArtem B. Bityutskiy 
1214daba5cc4SArtem B. Bityutskiy 	/* Find a suitable c->sector_size
1215daba5cc4SArtem B. Bityutskiy 	 * - Not too much sectors
1216daba5cc4SArtem B. Bityutskiy 	 * - Sectors have to be at least 4 K + some bytes
1217daba5cc4SArtem B. Bityutskiy 	 * - All known dataflashes have erase sizes of 528 or 1056
1218daba5cc4SArtem B. Bityutskiy 	 * - we take at least 8 eraseblocks and want to have at least 8K size
1219daba5cc4SArtem B. Bityutskiy 	 * - The concatenation should be a power of 2
1220daba5cc4SArtem B. Bityutskiy 	*/
1221daba5cc4SArtem B. Bityutskiy 
1222daba5cc4SArtem B. Bityutskiy 	c->sector_size = 8 * c->mtd->erasesize;
1223daba5cc4SArtem B. Bityutskiy 
1224daba5cc4SArtem B. Bityutskiy 	while (c->sector_size < 8192) {
1225daba5cc4SArtem B. Bityutskiy 		c->sector_size *= 2;
1226daba5cc4SArtem B. Bityutskiy 	}
1227daba5cc4SArtem B. Bityutskiy 
1228daba5cc4SArtem B. Bityutskiy 	/* It may be necessary to adjust the flash size */
1229daba5cc4SArtem B. Bityutskiy 	c->flash_size = c->mtd->size;
1230daba5cc4SArtem B. Bityutskiy 
1231daba5cc4SArtem B. Bityutskiy 	if ((c->flash_size % c->sector_size) != 0) {
1232daba5cc4SArtem B. Bityutskiy 		c->flash_size = (c->flash_size / c->sector_size) * c->sector_size;
12335a528957SJoe Perches 		pr_warn("flash size adjusted to %dKiB\n", c->flash_size);
1234daba5cc4SArtem B. Bityutskiy 	};
1235daba5cc4SArtem B. Bityutskiy 
1236daba5cc4SArtem B. Bityutskiy 	c->wbuf_ofs = 0xFFFFFFFF;
12378f15fd55SAndrew Victor 	c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
12388f15fd55SAndrew Victor 	if (!c->wbuf)
12398f15fd55SAndrew Victor 		return -ENOMEM;
12408f15fd55SAndrew Victor 
1241cca15841Smichael #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1242cca15841Smichael 	c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1243cca15841Smichael 	if (!c->wbuf_verify) {
1244cca15841Smichael 		kfree(c->oobbuf);
1245cca15841Smichael 		kfree(c->wbuf);
1246cca15841Smichael 		return -ENOMEM;
1247cca15841Smichael 	}
1248cca15841Smichael #endif
1249cca15841Smichael 
12505a528957SJoe Perches 	pr_info("write-buffering enabled buffer (%d) erasesize (%d)\n",
1251da320f05SJoe Perches 		c->wbuf_pagesize, c->sector_size);
12528f15fd55SAndrew Victor 
12538f15fd55SAndrew Victor 	return 0;
12548f15fd55SAndrew Victor }
12558f15fd55SAndrew Victor 
12568f15fd55SAndrew Victor void jffs2_dataflash_cleanup(struct jffs2_sb_info *c) {
1257cca15841Smichael #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1258cca15841Smichael 	kfree(c->wbuf_verify);
1259cca15841Smichael #endif
12608f15fd55SAndrew Victor 	kfree(c->wbuf);
12618f15fd55SAndrew Victor }
12628f15fd55SAndrew Victor 
126359da721aSNicolas Pitre int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) {
1264c8b229deSJoern Engel 	/* Cleanmarker currently occupies whole programming regions,
1265c8b229deSJoern Engel 	 * either one or 2 for 8Byte STMicro flashes. */
1266c8b229deSJoern Engel 	c->cleanmarker_size = max(16u, c->mtd->writesize);
126759da721aSNicolas Pitre 
126859da721aSNicolas Pitre 	/* Initialize write buffer */
126959da721aSNicolas Pitre 	init_rwsem(&c->wbuf_sem);
127028318776SJoern Engel 	c->wbuf_pagesize = c->mtd->writesize;
127159da721aSNicolas Pitre 	c->wbuf_ofs = 0xFFFFFFFF;
127259da721aSNicolas Pitre 
127359da721aSNicolas Pitre 	c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
127459da721aSNicolas Pitre 	if (!c->wbuf)
127559da721aSNicolas Pitre 		return -ENOMEM;
127659da721aSNicolas Pitre 
1277bc8cec0dSMassimo Cirillo #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1278bc8cec0dSMassimo Cirillo 	c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1279bc8cec0dSMassimo Cirillo 	if (!c->wbuf_verify) {
1280bc8cec0dSMassimo Cirillo 		kfree(c->wbuf);
1281bc8cec0dSMassimo Cirillo 		return -ENOMEM;
1282bc8cec0dSMassimo Cirillo 	}
1283bc8cec0dSMassimo Cirillo #endif
128459da721aSNicolas Pitre 	return 0;
128559da721aSNicolas Pitre }
128659da721aSNicolas Pitre 
128759da721aSNicolas Pitre void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c) {
1288bc8cec0dSMassimo Cirillo #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1289bc8cec0dSMassimo Cirillo 	kfree(c->wbuf_verify);
1290bc8cec0dSMassimo Cirillo #endif
129159da721aSNicolas Pitre 	kfree(c->wbuf);
129259da721aSNicolas Pitre }
12930029da3bSArtem Bityutskiy 
12940029da3bSArtem Bityutskiy int jffs2_ubivol_setup(struct jffs2_sb_info *c) {
12950029da3bSArtem Bityutskiy 	c->cleanmarker_size = 0;
12960029da3bSArtem Bityutskiy 
12970029da3bSArtem Bityutskiy 	if (c->mtd->writesize == 1)
12980029da3bSArtem Bityutskiy 		/* We do not need write-buffer */
12990029da3bSArtem Bityutskiy 		return 0;
13000029da3bSArtem Bityutskiy 
13010029da3bSArtem Bityutskiy 	init_rwsem(&c->wbuf_sem);
13020029da3bSArtem Bityutskiy 
13030029da3bSArtem Bityutskiy 	c->wbuf_pagesize =  c->mtd->writesize;
13040029da3bSArtem Bityutskiy 	c->wbuf_ofs = 0xFFFFFFFF;
13050029da3bSArtem Bityutskiy 	c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
13060029da3bSArtem Bityutskiy 	if (!c->wbuf)
13070029da3bSArtem Bityutskiy 		return -ENOMEM;
13080029da3bSArtem Bityutskiy 
13095a528957SJoe Perches 	pr_info("write-buffering enabled buffer (%d) erasesize (%d)\n",
1310da320f05SJoe Perches 		c->wbuf_pagesize, c->sector_size);
13110029da3bSArtem Bityutskiy 
13120029da3bSArtem Bityutskiy 	return 0;
13130029da3bSArtem Bityutskiy }
13140029da3bSArtem Bityutskiy 
13150029da3bSArtem Bityutskiy void jffs2_ubivol_cleanup(struct jffs2_sb_info *c) {
13160029da3bSArtem Bityutskiy 	kfree(c->wbuf);
13170029da3bSArtem Bityutskiy }
1318