xref: /openbmc/linux/fs/jffs2/wbuf.c (revision 9223a456)
1 /*
2  * JFFS2 -- Journalling Flash File System, Version 2.
3  *
4  * Copyright (C) 2001-2003 Red Hat, Inc.
5  * Copyright (C) 2004 Thomas Gleixner <tglx@linutronix.de>
6  *
7  * Created by David Woodhouse <dwmw2@infradead.org>
8  * Modified debugged and enhanced by Thomas Gleixner <tglx@linutronix.de>
9  *
10  * For licensing information, see the file 'LICENCE' in this directory.
11  *
12  * $Id: wbuf.c,v 1.100 2005/09/30 13:59:13 dedekind Exp $
13  *
14  */
15 
16 #include <linux/kernel.h>
17 #include <linux/slab.h>
18 #include <linux/mtd/mtd.h>
19 #include <linux/crc32.h>
20 #include <linux/mtd/nand.h>
21 #include <linux/jiffies.h>
22 
23 #include "nodelist.h"
24 
25 /* For testing write failures */
26 #undef BREAKME
27 #undef BREAKMEHEADER
28 
29 #ifdef BREAKME
30 static unsigned char *brokenbuf;
31 #endif
32 
33 #define PAGE_DIV(x) ( ((unsigned long)(x) / (unsigned long)(c->wbuf_pagesize)) * (unsigned long)(c->wbuf_pagesize) )
34 #define PAGE_MOD(x) ( (unsigned long)(x) % (unsigned long)(c->wbuf_pagesize) )
35 
36 /* max. erase failures before we mark a block bad */
37 #define MAX_ERASE_FAILURES 	2
38 
39 struct jffs2_inodirty {
40 	uint32_t ino;
41 	struct jffs2_inodirty *next;
42 };
43 
44 static struct jffs2_inodirty inodirty_nomem;
45 
46 static int jffs2_wbuf_pending_for_ino(struct jffs2_sb_info *c, uint32_t ino)
47 {
48 	struct jffs2_inodirty *this = c->wbuf_inodes;
49 
50 	/* If a malloc failed, consider _everything_ dirty */
51 	if (this == &inodirty_nomem)
52 		return 1;
53 
54 	/* If ino == 0, _any_ non-GC writes mean 'yes' */
55 	if (this && !ino)
56 		return 1;
57 
58 	/* Look to see if the inode in question is pending in the wbuf */
59 	while (this) {
60 		if (this->ino == ino)
61 			return 1;
62 		this = this->next;
63 	}
64 	return 0;
65 }
66 
67 static void jffs2_clear_wbuf_ino_list(struct jffs2_sb_info *c)
68 {
69 	struct jffs2_inodirty *this;
70 
71 	this = c->wbuf_inodes;
72 
73 	if (this != &inodirty_nomem) {
74 		while (this) {
75 			struct jffs2_inodirty *next = this->next;
76 			kfree(this);
77 			this = next;
78 		}
79 	}
80 	c->wbuf_inodes = NULL;
81 }
82 
83 static void jffs2_wbuf_dirties_inode(struct jffs2_sb_info *c, uint32_t ino)
84 {
85 	struct jffs2_inodirty *new;
86 
87 	/* Mark the superblock dirty so that kupdated will flush... */
88 	jffs2_erase_pending_trigger(c);
89 
90 	if (jffs2_wbuf_pending_for_ino(c, ino))
91 		return;
92 
93 	new = kmalloc(sizeof(*new), GFP_KERNEL);
94 	if (!new) {
95 		D1(printk(KERN_DEBUG "No memory to allocate inodirty. Fallback to all considered dirty\n"));
96 		jffs2_clear_wbuf_ino_list(c);
97 		c->wbuf_inodes = &inodirty_nomem;
98 		return;
99 	}
100 	new->ino = ino;
101 	new->next = c->wbuf_inodes;
102 	c->wbuf_inodes = new;
103 	return;
104 }
105 
106 static inline void jffs2_refile_wbuf_blocks(struct jffs2_sb_info *c)
107 {
108 	struct list_head *this, *next;
109 	static int n;
110 
111 	if (list_empty(&c->erasable_pending_wbuf_list))
112 		return;
113 
114 	list_for_each_safe(this, next, &c->erasable_pending_wbuf_list) {
115 		struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
116 
117 		D1(printk(KERN_DEBUG "Removing eraseblock at 0x%08x from erasable_pending_wbuf_list...\n", jeb->offset));
118 		list_del(this);
119 		if ((jiffies + (n++)) & 127) {
120 			/* Most of the time, we just erase it immediately. Otherwise we
121 			   spend ages scanning it on mount, etc. */
122 			D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
123 			list_add_tail(&jeb->list, &c->erase_pending_list);
124 			c->nr_erasing_blocks++;
125 			jffs2_erase_pending_trigger(c);
126 		} else {
127 			/* Sometimes, however, we leave it elsewhere so it doesn't get
128 			   immediately reused, and we spread the load a bit. */
129 			D1(printk(KERN_DEBUG "...and adding to erasable_list\n"));
130 			list_add_tail(&jeb->list, &c->erasable_list);
131 		}
132 	}
133 }
134 
135 #define REFILE_NOTEMPTY 0
136 #define REFILE_ANYWAY   1
137 
138 static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int allow_empty)
139 {
140 	D1(printk("About to refile bad block at %08x\n", jeb->offset));
141 
142 	/* File the existing block on the bad_used_list.... */
143 	if (c->nextblock == jeb)
144 		c->nextblock = NULL;
145 	else /* Not sure this should ever happen... need more coffee */
146 		list_del(&jeb->list);
147 	if (jeb->first_node) {
148 		D1(printk("Refiling block at %08x to bad_used_list\n", jeb->offset));
149 		list_add(&jeb->list, &c->bad_used_list);
150 	} else {
151 		BUG_ON(allow_empty == REFILE_NOTEMPTY);
152 		/* It has to have had some nodes or we couldn't be here */
153 		D1(printk("Refiling block at %08x to erase_pending_list\n", jeb->offset));
154 		list_add(&jeb->list, &c->erase_pending_list);
155 		c->nr_erasing_blocks++;
156 		jffs2_erase_pending_trigger(c);
157 	}
158 
159 	/* Adjust its size counts accordingly */
160 	c->wasted_size += jeb->free_size;
161 	c->free_size -= jeb->free_size;
162 	jeb->wasted_size += jeb->free_size;
163 	jeb->free_size = 0;
164 
165 	jffs2_dbg_dump_block_lists_nolock(c);
166 	jffs2_dbg_acct_sanity_check_nolock(c,jeb);
167 	jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
168 }
169 
170 /* Recover from failure to write wbuf. Recover the nodes up to the
171  * wbuf, not the one which we were starting to try to write. */
172 
173 static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
174 {
175 	struct jffs2_eraseblock *jeb, *new_jeb;
176 	struct jffs2_raw_node_ref **first_raw, **raw;
177 	size_t retlen;
178 	int ret;
179 	unsigned char *buf;
180 	uint32_t start, end, ofs, len;
181 
182 	spin_lock(&c->erase_completion_lock);
183 
184 	jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
185 
186 	jffs2_block_refile(c, jeb, REFILE_NOTEMPTY);
187 
188 	/* Find the first node to be recovered, by skipping over every
189 	   node which ends before the wbuf starts, or which is obsolete. */
190 	first_raw = &jeb->first_node;
191 	while (*first_raw &&
192 	       (ref_obsolete(*first_raw) ||
193 		(ref_offset(*first_raw)+ref_totlen(c, jeb, *first_raw)) < c->wbuf_ofs)) {
194 		D1(printk(KERN_DEBUG "Skipping node at 0x%08x(%d)-0x%08x which is either before 0x%08x or obsolete\n",
195 			  ref_offset(*first_raw), ref_flags(*first_raw),
196 			  (ref_offset(*first_raw) + ref_totlen(c, jeb, *first_raw)),
197 			  c->wbuf_ofs));
198 		first_raw = &(*first_raw)->next_phys;
199 	}
200 
201 	if (!*first_raw) {
202 		/* All nodes were obsolete. Nothing to recover. */
203 		D1(printk(KERN_DEBUG "No non-obsolete nodes to be recovered. Just filing block bad\n"));
204 		spin_unlock(&c->erase_completion_lock);
205 		return;
206 	}
207 
208 	start = ref_offset(*first_raw);
209 	end = ref_offset(*first_raw) + ref_totlen(c, jeb, *first_raw);
210 
211 	/* Find the last node to be recovered */
212 	raw = first_raw;
213 	while ((*raw)) {
214 		if (!ref_obsolete(*raw))
215 			end = ref_offset(*raw) + ref_totlen(c, jeb, *raw);
216 
217 		raw = &(*raw)->next_phys;
218 	}
219 	spin_unlock(&c->erase_completion_lock);
220 
221 	D1(printk(KERN_DEBUG "wbuf recover %08x-%08x\n", start, end));
222 
223 	buf = NULL;
224 	if (start < c->wbuf_ofs) {
225 		/* First affected node was already partially written.
226 		 * Attempt to reread the old data into our buffer. */
227 
228 		buf = kmalloc(end - start, GFP_KERNEL);
229 		if (!buf) {
230 			printk(KERN_CRIT "Malloc failure in wbuf recovery. Data loss ensues.\n");
231 
232 			goto read_failed;
233 		}
234 
235 		/* Do the read... */
236 		ret = c->mtd->read(c->mtd, start, c->wbuf_ofs - start, &retlen, buf);
237 
238 		if (ret == -EBADMSG && retlen == c->wbuf_ofs - start) {
239 			/* ECC recovered */
240 			ret = 0;
241 		}
242 		if (ret || retlen != c->wbuf_ofs - start) {
243 			printk(KERN_CRIT "Old data are already lost in wbuf recovery. Data loss ensues.\n");
244 
245 			kfree(buf);
246 			buf = NULL;
247 		read_failed:
248 			first_raw = &(*first_raw)->next_phys;
249 			/* If this was the only node to be recovered, give up */
250 			if (!(*first_raw))
251 				return;
252 
253 			/* It wasn't. Go on and try to recover nodes complete in the wbuf */
254 			start = ref_offset(*first_raw);
255 		} else {
256 			/* Read succeeded. Copy the remaining data from the wbuf */
257 			memcpy(buf + (c->wbuf_ofs - start), c->wbuf, end - c->wbuf_ofs);
258 		}
259 	}
260 	/* OK... we're to rewrite (end-start) bytes of data from first_raw onwards.
261 	   Either 'buf' contains the data, or we find it in the wbuf */
262 
263 
264 	/* ... and get an allocation of space from a shiny new block instead */
265 	ret = jffs2_reserve_space_gc(c, end-start, &len, JFFS2_SUMMARY_NOSUM_SIZE);
266 	if (ret) {
267 		printk(KERN_WARNING "Failed to allocate space for wbuf recovery. Data loss ensues.\n");
268 		kfree(buf);
269 		return;
270 	}
271 	ofs = write_ofs(c);
272 
273 	if (end-start >= c->wbuf_pagesize) {
274 		/* Need to do another write immediately, but it's possible
275 		   that this is just because the wbuf itself is completely
276 		   full, and there's nothing earlier read back from the
277 		   flash. Hence 'buf' isn't necessarily what we're writing
278 		   from. */
279 		unsigned char *rewrite_buf = buf?:c->wbuf;
280 		uint32_t towrite = (end-start) - ((end-start)%c->wbuf_pagesize);
281 
282 		D1(printk(KERN_DEBUG "Write 0x%x bytes at 0x%08x in wbuf recover\n",
283 			  towrite, ofs));
284 
285 #ifdef BREAKMEHEADER
286 		static int breakme;
287 		if (breakme++ == 20) {
288 			printk(KERN_NOTICE "Faking write error at 0x%08x\n", ofs);
289 			breakme = 0;
290 			c->mtd->write(c->mtd, ofs, towrite, &retlen,
291 				      brokenbuf);
292 			ret = -EIO;
293 		} else
294 #endif
295 			ret = c->mtd->write(c->mtd, ofs, towrite, &retlen,
296 					    rewrite_buf);
297 
298 		if (ret || retlen != towrite) {
299 			/* Argh. We tried. Really we did. */
300 			printk(KERN_CRIT "Recovery of wbuf failed due to a second write error\n");
301 			kfree(buf);
302 
303 			if (retlen) {
304 				struct jffs2_raw_node_ref *raw2;
305 
306 				raw2 = jffs2_alloc_raw_node_ref();
307 				if (!raw2)
308 					return;
309 
310 				raw2->flash_offset = ofs | REF_OBSOLETE;
311 
312 				jffs2_add_physical_node_ref(c, raw2, ref_totlen(c, jeb, *first_raw), NULL);
313 			}
314 			return;
315 		}
316 		printk(KERN_NOTICE "Recovery of wbuf succeeded to %08x\n", ofs);
317 
318 		c->wbuf_len = (end - start) - towrite;
319 		c->wbuf_ofs = ofs + towrite;
320 		memmove(c->wbuf, rewrite_buf + towrite, c->wbuf_len);
321 		/* Don't muck about with c->wbuf_inodes. False positives are harmless. */
322 		kfree(buf);
323 	} else {
324 		/* OK, now we're left with the dregs in whichever buffer we're using */
325 		if (buf) {
326 			memcpy(c->wbuf, buf, end-start);
327 			kfree(buf);
328 		} else {
329 			memmove(c->wbuf, c->wbuf + (start - c->wbuf_ofs), end - start);
330 		}
331 		c->wbuf_ofs = ofs;
332 		c->wbuf_len = end - start;
333 	}
334 
335 	/* Now sort out the jffs2_raw_node_refs, moving them from the old to the next block */
336 	new_jeb = &c->blocks[ofs / c->sector_size];
337 
338 	spin_lock(&c->erase_completion_lock);
339 	if (new_jeb->first_node) {
340 		/* Odd, but possible with ST flash later maybe */
341 		new_jeb->last_node->next_phys = *first_raw;
342 	} else {
343 		new_jeb->first_node = *first_raw;
344 	}
345 
346 	raw = first_raw;
347 	while (*raw) {
348 		uint32_t rawlen = ref_totlen(c, jeb, *raw);
349 
350 		D1(printk(KERN_DEBUG "Refiling block of %08x at %08x(%d) to %08x\n",
351 			  rawlen, ref_offset(*raw), ref_flags(*raw), ofs));
352 
353 		if (ref_obsolete(*raw)) {
354 			/* Shouldn't really happen much */
355 			new_jeb->dirty_size += rawlen;
356 			new_jeb->free_size -= rawlen;
357 			c->dirty_size += rawlen;
358 		} else {
359 			new_jeb->used_size += rawlen;
360 			new_jeb->free_size -= rawlen;
361 			jeb->dirty_size += rawlen;
362 			jeb->used_size  -= rawlen;
363 			c->dirty_size += rawlen;
364 		}
365 		c->free_size -= rawlen;
366 		(*raw)->flash_offset = ofs | ref_flags(*raw);
367 		ofs += rawlen;
368 		new_jeb->last_node = *raw;
369 
370 		raw = &(*raw)->next_phys;
371 	}
372 
373 	/* Fix up the original jeb now it's on the bad_list */
374 	*first_raw = NULL;
375 	if (first_raw == &jeb->first_node) {
376 		jeb->last_node = NULL;
377 		D1(printk(KERN_DEBUG "Failing block at %08x is now empty. Moving to erase_pending_list\n", jeb->offset));
378 		list_del(&jeb->list);
379 		list_add(&jeb->list, &c->erase_pending_list);
380 		c->nr_erasing_blocks++;
381 		jffs2_erase_pending_trigger(c);
382 	}
383 	else
384 		jeb->last_node = container_of(first_raw, struct jffs2_raw_node_ref, next_phys);
385 
386 	jffs2_dbg_acct_sanity_check_nolock(c, jeb);
387         jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
388 
389 	jffs2_dbg_acct_sanity_check_nolock(c, new_jeb);
390         jffs2_dbg_acct_paranoia_check_nolock(c, new_jeb);
391 
392 	spin_unlock(&c->erase_completion_lock);
393 
394 	D1(printk(KERN_DEBUG "wbuf recovery completed OK\n"));
395 }
396 
397 /* Meaning of pad argument:
398    0: Do not pad. Probably pointless - we only ever use this when we can't pad anyway.
399    1: Pad, do not adjust nextblock free_size
400    2: Pad, adjust nextblock free_size
401 */
402 #define NOPAD		0
403 #define PAD_NOACCOUNT	1
404 #define PAD_ACCOUNTING	2
405 
406 static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
407 {
408 	int ret;
409 	size_t retlen;
410 
411 	/* Nothing to do if not write-buffering the flash. In particular, we shouldn't
412 	   del_timer() the timer we never initialised. */
413 	if (!jffs2_is_writebuffered(c))
414 		return 0;
415 
416 	if (!down_trylock(&c->alloc_sem)) {
417 		up(&c->alloc_sem);
418 		printk(KERN_CRIT "jffs2_flush_wbuf() called with alloc_sem not locked!\n");
419 		BUG();
420 	}
421 
422 	if (!c->wbuf_len)	/* already checked c->wbuf above */
423 		return 0;
424 
425 	/* claim remaining space on the page
426 	   this happens, if we have a change to a new block,
427 	   or if fsync forces us to flush the writebuffer.
428 	   if we have a switch to next page, we will not have
429 	   enough remaining space for this.
430 	*/
431 	if (pad ) {
432 		c->wbuf_len = PAD(c->wbuf_len);
433 
434 		/* Pad with JFFS2_DIRTY_BITMASK initially.  this helps out ECC'd NOR
435 		   with 8 byte page size */
436 		memset(c->wbuf + c->wbuf_len, 0, c->wbuf_pagesize - c->wbuf_len);
437 
438 		if ( c->wbuf_len + sizeof(struct jffs2_unknown_node) < c->wbuf_pagesize) {
439 			struct jffs2_unknown_node *padnode = (void *)(c->wbuf + c->wbuf_len);
440 			padnode->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
441 			padnode->nodetype = cpu_to_je16(JFFS2_NODETYPE_PADDING);
442 			padnode->totlen = cpu_to_je32(c->wbuf_pagesize - c->wbuf_len);
443 			padnode->hdr_crc = cpu_to_je32(crc32(0, padnode, sizeof(*padnode)-4));
444 		}
445 	}
446 	/* else jffs2_flash_writev has actually filled in the rest of the
447 	   buffer for us, and will deal with the node refs etc. later. */
448 
449 #ifdef BREAKME
450 	static int breakme;
451 	if (breakme++ == 20) {
452 		printk(KERN_NOTICE "Faking write error at 0x%08x\n", c->wbuf_ofs);
453 		breakme = 0;
454 		c->mtd->write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen,
455 			      brokenbuf);
456 		ret = -EIO;
457 	} else
458 #endif
459 
460 		ret = c->mtd->write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, c->wbuf);
461 
462 	if (ret || retlen != c->wbuf_pagesize) {
463 		if (ret)
464 			printk(KERN_WARNING "jffs2_flush_wbuf(): Write failed with %d\n",ret);
465 		else {
466 			printk(KERN_WARNING "jffs2_flush_wbuf(): Write was short: %zd instead of %d\n",
467 				retlen, c->wbuf_pagesize);
468 			ret = -EIO;
469 		}
470 
471 		jffs2_wbuf_recover(c);
472 
473 		return ret;
474 	}
475 
476 	/* Adjust free size of the block if we padded. */
477 	if (pad) {
478 		struct jffs2_eraseblock *jeb;
479 		struct jffs2_raw_node_ref *ref;
480 		uint32_t waste = c->wbuf_pagesize - c->wbuf_len;
481 
482 		jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
483 
484 		D1(printk(KERN_DEBUG "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n",
485 			  (jeb==c->nextblock)?"next":"", jeb->offset));
486 
487 		/* wbuf_pagesize - wbuf_len is the amount of space that's to be
488 		   padded. If there is less free space in the block than that,
489 		   something screwed up */
490 		if (jeb->free_size < waste) {
491 			printk(KERN_CRIT "jffs2_flush_wbuf(): Accounting error. wbuf at 0x%08x has 0x%03x bytes, 0x%03x left.\n",
492 			       c->wbuf_ofs, c->wbuf_len, waste);
493 			printk(KERN_CRIT "jffs2_flush_wbuf(): But free_size for block at 0x%08x is only 0x%08x\n",
494 			       jeb->offset, jeb->free_size);
495 			BUG();
496 		}
497 		ref = jffs2_alloc_raw_node_ref();
498 		if (!ref)
499 			return -ENOMEM;
500 		ref->flash_offset = c->wbuf_ofs + c->wbuf_len;
501 		ref->flash_offset |= REF_OBSOLETE;
502 
503 		spin_lock(&c->erase_completion_lock);
504 
505 		jffs2_link_node_ref(c, jeb, ref, waste, NULL);
506 		/* FIXME: that made it count as dirty. Convert to wasted */
507 		jeb->dirty_size -= waste;
508 		c->dirty_size -= waste;
509 		jeb->wasted_size += waste;
510 		c->wasted_size += waste;
511 	} else
512 		spin_lock(&c->erase_completion_lock);
513 
514 	/* Stick any now-obsoleted blocks on the erase_pending_list */
515 	jffs2_refile_wbuf_blocks(c);
516 	jffs2_clear_wbuf_ino_list(c);
517 	spin_unlock(&c->erase_completion_lock);
518 
519 	memset(c->wbuf,0xff,c->wbuf_pagesize);
520 	/* adjust write buffer offset, else we get a non contiguous write bug */
521 	c->wbuf_ofs += c->wbuf_pagesize;
522 	c->wbuf_len = 0;
523 	return 0;
524 }
525 
526 /* Trigger garbage collection to flush the write-buffer.
527    If ino arg is zero, do it if _any_ real (i.e. not GC) writes are
528    outstanding. If ino arg non-zero, do it only if a write for the
529    given inode is outstanding. */
530 int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino)
531 {
532 	uint32_t old_wbuf_ofs;
533 	uint32_t old_wbuf_len;
534 	int ret = 0;
535 
536 	D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() called for ino #%u...\n", ino));
537 
538 	if (!c->wbuf)
539 		return 0;
540 
541 	down(&c->alloc_sem);
542 	if (!jffs2_wbuf_pending_for_ino(c, ino)) {
543 		D1(printk(KERN_DEBUG "Ino #%d not pending in wbuf. Returning\n", ino));
544 		up(&c->alloc_sem);
545 		return 0;
546 	}
547 
548 	old_wbuf_ofs = c->wbuf_ofs;
549 	old_wbuf_len = c->wbuf_len;
550 
551 	if (c->unchecked_size) {
552 		/* GC won't make any progress for a while */
553 		D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() padding. Not finished checking\n"));
554 		down_write(&c->wbuf_sem);
555 		ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
556 		/* retry flushing wbuf in case jffs2_wbuf_recover
557 		   left some data in the wbuf */
558 		if (ret)
559 			ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
560 		up_write(&c->wbuf_sem);
561 	} else while (old_wbuf_len &&
562 		      old_wbuf_ofs == c->wbuf_ofs) {
563 
564 		up(&c->alloc_sem);
565 
566 		D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() calls gc pass\n"));
567 
568 		ret = jffs2_garbage_collect_pass(c);
569 		if (ret) {
570 			/* GC failed. Flush it with padding instead */
571 			down(&c->alloc_sem);
572 			down_write(&c->wbuf_sem);
573 			ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
574 			/* retry flushing wbuf in case jffs2_wbuf_recover
575 			   left some data in the wbuf */
576 			if (ret)
577 				ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
578 			up_write(&c->wbuf_sem);
579 			break;
580 		}
581 		down(&c->alloc_sem);
582 	}
583 
584 	D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() ends...\n"));
585 
586 	up(&c->alloc_sem);
587 	return ret;
588 }
589 
590 /* Pad write-buffer to end and write it, wasting space. */
591 int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c)
592 {
593 	int ret;
594 
595 	if (!c->wbuf)
596 		return 0;
597 
598 	down_write(&c->wbuf_sem);
599 	ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
600 	/* retry - maybe wbuf recover left some data in wbuf. */
601 	if (ret)
602 		ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
603 	up_write(&c->wbuf_sem);
604 
605 	return ret;
606 }
607 
608 static size_t jffs2_fill_wbuf(struct jffs2_sb_info *c, const uint8_t *buf,
609 			      size_t len)
610 {
611 	if (len && !c->wbuf_len && (len >= c->wbuf_pagesize))
612 		return 0;
613 
614 	if (len > (c->wbuf_pagesize - c->wbuf_len))
615 		len = c->wbuf_pagesize - c->wbuf_len;
616 	memcpy(c->wbuf + c->wbuf_len, buf, len);
617 	c->wbuf_len += (uint32_t) len;
618 	return len;
619 }
620 
621 int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs,
622 		       unsigned long count, loff_t to, size_t *retlen,
623 		       uint32_t ino)
624 {
625 	struct jffs2_eraseblock *jeb;
626 	size_t wbuf_retlen, donelen = 0;
627 	uint32_t outvec_to = to;
628 	int ret, invec;
629 
630 	/* If not writebuffered flash, don't bother */
631 	if (!jffs2_is_writebuffered(c))
632 		return jffs2_flash_direct_writev(c, invecs, count, to, retlen);
633 
634 	down_write(&c->wbuf_sem);
635 
636 	/* If wbuf_ofs is not initialized, set it to target address */
637 	if (c->wbuf_ofs == 0xFFFFFFFF) {
638 		c->wbuf_ofs = PAGE_DIV(to);
639 		c->wbuf_len = PAGE_MOD(to);
640 		memset(c->wbuf,0xff,c->wbuf_pagesize);
641 	}
642 
643 	/*
644 	 * Sanity checks on target address.  It's permitted to write
645 	 * at PAD(c->wbuf_len+c->wbuf_ofs), and it's permitted to
646 	 * write at the beginning of a new erase block. Anything else,
647 	 * and you die.  New block starts at xxx000c (0-b = block
648 	 * header)
649 	 */
650 	if (SECTOR_ADDR(to) != SECTOR_ADDR(c->wbuf_ofs)) {
651 		/* It's a write to a new block */
652 		if (c->wbuf_len) {
653 			D1(printk(KERN_DEBUG "jffs2_flash_writev() to 0x%lx "
654 				  "causes flush of wbuf at 0x%08x\n",
655 				  (unsigned long)to, c->wbuf_ofs));
656 			ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
657 			if (ret)
658 				goto outerr;
659 		}
660 		/* set pointer to new block */
661 		c->wbuf_ofs = PAGE_DIV(to);
662 		c->wbuf_len = PAGE_MOD(to);
663 	}
664 
665 	if (to != PAD(c->wbuf_ofs + c->wbuf_len)) {
666 		/* We're not writing immediately after the writebuffer. Bad. */
667 		printk(KERN_CRIT "jffs2_flash_writev(): Non-contiguous write "
668 		       "to %08lx\n", (unsigned long)to);
669 		if (c->wbuf_len)
670 			printk(KERN_CRIT "wbuf was previously %08x-%08x\n",
671 			       c->wbuf_ofs, c->wbuf_ofs+c->wbuf_len);
672 		BUG();
673 	}
674 
675 	/* adjust alignment offset */
676 	if (c->wbuf_len != PAGE_MOD(to)) {
677 		c->wbuf_len = PAGE_MOD(to);
678 		/* take care of alignment to next page */
679 		if (!c->wbuf_len) {
680 			c->wbuf_len = c->wbuf_pagesize;
681 			ret = __jffs2_flush_wbuf(c, NOPAD);
682 			if (ret)
683 				goto outerr;
684 		}
685 	}
686 
687 	for (invec = 0; invec < count; invec++) {
688 		int vlen = invecs[invec].iov_len;
689 		uint8_t *v = invecs[invec].iov_base;
690 
691 		wbuf_retlen = jffs2_fill_wbuf(c, v, vlen);
692 
693 		if (c->wbuf_len == c->wbuf_pagesize) {
694 			ret = __jffs2_flush_wbuf(c, NOPAD);
695 			if (ret)
696 				goto outerr;
697 		}
698 		vlen -= wbuf_retlen;
699 		outvec_to += wbuf_retlen;
700 		donelen += wbuf_retlen;
701 		v += wbuf_retlen;
702 
703 		if (vlen >= c->wbuf_pagesize) {
704 			ret = c->mtd->write(c->mtd, outvec_to, PAGE_DIV(vlen),
705 					    &wbuf_retlen, v);
706 			if (ret < 0 || wbuf_retlen != PAGE_DIV(vlen))
707 				goto outfile;
708 
709 			vlen -= wbuf_retlen;
710 			outvec_to += wbuf_retlen;
711 			c->wbuf_ofs = outvec_to;
712 			donelen += wbuf_retlen;
713 			v += wbuf_retlen;
714 		}
715 
716 		wbuf_retlen = jffs2_fill_wbuf(c, v, vlen);
717 		if (c->wbuf_len == c->wbuf_pagesize) {
718 			ret = __jffs2_flush_wbuf(c, NOPAD);
719 			if (ret)
720 				goto outerr;
721 		}
722 
723 		outvec_to += wbuf_retlen;
724 		donelen += wbuf_retlen;
725 	}
726 
727 	/*
728 	 * If there's a remainder in the wbuf and it's a non-GC write,
729 	 * remember that the wbuf affects this ino
730 	 */
731 	*retlen = donelen;
732 
733 	if (jffs2_sum_active()) {
734 		int res = jffs2_sum_add_kvec(c, invecs, count, (uint32_t) to);
735 		if (res)
736 			return res;
737 	}
738 
739 	if (c->wbuf_len && ino)
740 		jffs2_wbuf_dirties_inode(c, ino);
741 
742 	ret = 0;
743 	up_write(&c->wbuf_sem);
744 	return ret;
745 
746 outfile:
747 	/*
748 	 * At this point we have no problem, c->wbuf is empty. However
749 	 * refile nextblock to avoid writing again to same address.
750 	 */
751 
752 	spin_lock(&c->erase_completion_lock);
753 
754 	jeb = &c->blocks[outvec_to / c->sector_size];
755 	jffs2_block_refile(c, jeb, REFILE_ANYWAY);
756 
757 	spin_unlock(&c->erase_completion_lock);
758 
759 outerr:
760 	*retlen = 0;
761 	up_write(&c->wbuf_sem);
762 	return ret;
763 }
764 
765 /*
766  *	This is the entry for flash write.
767  *	Check, if we work on NAND FLASH, if so build an kvec and write it via vritev
768 */
769 int jffs2_flash_write(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, const u_char *buf)
770 {
771 	struct kvec vecs[1];
772 
773 	if (!jffs2_is_writebuffered(c))
774 		return jffs2_flash_direct_write(c, ofs, len, retlen, buf);
775 
776 	vecs[0].iov_base = (unsigned char *) buf;
777 	vecs[0].iov_len = len;
778 	return jffs2_flash_writev(c, vecs, 1, ofs, retlen, 0);
779 }
780 
781 /*
782 	Handle readback from writebuffer and ECC failure return
783 */
784 int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, u_char *buf)
785 {
786 	loff_t	orbf = 0, owbf = 0, lwbf = 0;
787 	int	ret;
788 
789 	if (!jffs2_is_writebuffered(c))
790 		return c->mtd->read(c->mtd, ofs, len, retlen, buf);
791 
792 	/* Read flash */
793 	down_read(&c->wbuf_sem);
794 	ret = c->mtd->read(c->mtd, ofs, len, retlen, buf);
795 
796 	if ( (ret == -EBADMSG) && (*retlen == len) ) {
797 		printk(KERN_WARNING "mtd->read(0x%zx bytes from 0x%llx) returned ECC error\n",
798 		       len, ofs);
799 		/*
800 		 * We have the raw data without ECC correction in the buffer, maybe
801 		 * we are lucky and all data or parts are correct. We check the node.
802 		 * If data are corrupted node check will sort it out.
803 		 * We keep this block, it will fail on write or erase and the we
804 		 * mark it bad. Or should we do that now? But we should give him a chance.
805 		 * Maybe we had a system crash or power loss before the ecc write or
806 		 * a erase was completed.
807 		 * So we return success. :)
808 		 */
809 	 	ret = 0;
810 	}
811 
812 	/* if no writebuffer available or write buffer empty, return */
813 	if (!c->wbuf_pagesize || !c->wbuf_len)
814 		goto exit;
815 
816 	/* if we read in a different block, return */
817 	if (SECTOR_ADDR(ofs) != SECTOR_ADDR(c->wbuf_ofs))
818 		goto exit;
819 
820 	if (ofs >= c->wbuf_ofs) {
821 		owbf = (ofs - c->wbuf_ofs);	/* offset in write buffer */
822 		if (owbf > c->wbuf_len)		/* is read beyond write buffer ? */
823 			goto exit;
824 		lwbf = c->wbuf_len - owbf;	/* number of bytes to copy */
825 		if (lwbf > len)
826 			lwbf = len;
827 	} else {
828 		orbf = (c->wbuf_ofs - ofs);	/* offset in read buffer */
829 		if (orbf > len)			/* is write beyond write buffer ? */
830 			goto exit;
831 		lwbf = len - orbf; 		/* number of bytes to copy */
832 		if (lwbf > c->wbuf_len)
833 			lwbf = c->wbuf_len;
834 	}
835 	if (lwbf > 0)
836 		memcpy(buf+orbf,c->wbuf+owbf,lwbf);
837 
838 exit:
839 	up_read(&c->wbuf_sem);
840 	return ret;
841 }
842 
843 /*
844  *	Check, if the out of band area is empty
845  */
846 int jffs2_check_oob_empty( struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int mode)
847 {
848 	unsigned char *buf;
849 	int 	ret = 0;
850 	int	i,len,page;
851 	size_t  retlen;
852 	int	oob_size;
853 
854 	/* allocate a buffer for all oob data in this sector */
855 	oob_size = c->mtd->oobsize;
856 	len = 4 * oob_size;
857 	buf = kmalloc(len, GFP_KERNEL);
858 	if (!buf) {
859 		printk(KERN_NOTICE "jffs2_check_oob_empty(): allocation of temporary data buffer for oob check failed\n");
860 		return -ENOMEM;
861 	}
862 	/*
863 	 * if mode = 0, we scan for a total empty oob area, else we have
864 	 * to take care of the cleanmarker in the first page of the block
865 	*/
866 	ret = jffs2_flash_read_oob(c, jeb->offset, len , &retlen, buf);
867 	if (ret) {
868 		D1(printk(KERN_WARNING "jffs2_check_oob_empty(): Read OOB failed %d for block at %08x\n", ret, jeb->offset));
869 		goto out;
870 	}
871 
872 	if (retlen < len) {
873 		D1(printk(KERN_WARNING "jffs2_check_oob_empty(): Read OOB return short read "
874 			  "(%zd bytes not %d) for block at %08x\n", retlen, len, jeb->offset));
875 		ret = -EIO;
876 		goto out;
877 	}
878 
879 	/* Special check for first page */
880 	for(i = 0; i < oob_size ; i++) {
881 		/* Yeah, we know about the cleanmarker. */
882 		if (mode && i >= c->fsdata_pos &&
883 		    i < c->fsdata_pos + c->fsdata_len)
884 			continue;
885 
886 		if (buf[i] != 0xFF) {
887 			D2(printk(KERN_DEBUG "Found %02x at %x in OOB for %08x\n",
888 				  buf[i], i, jeb->offset));
889 			ret = 1;
890 			goto out;
891 		}
892 	}
893 
894 	/* we know, we are aligned :) */
895 	for (page = oob_size; page < len; page += sizeof(long)) {
896 		unsigned long dat = *(unsigned long *)(&buf[page]);
897 		if(dat != -1) {
898 			ret = 1;
899 			goto out;
900 		}
901 	}
902 
903 out:
904 	kfree(buf);
905 
906 	return ret;
907 }
908 
909 /*
910 *	Scan for a valid cleanmarker and for bad blocks
911 *	For virtual blocks (concatenated physical blocks) check the cleanmarker
912 *	only in the first page of the first physical block, but scan for bad blocks in all
913 *	physical blocks
914 */
915 int jffs2_check_nand_cleanmarker (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
916 {
917 	struct jffs2_unknown_node n;
918 	unsigned char buf[2 * NAND_MAX_OOBSIZE];
919 	unsigned char *p;
920 	int ret, i, cnt, retval = 0;
921 	size_t retlen, offset;
922 	int oob_size;
923 
924 	offset = jeb->offset;
925 	oob_size = c->mtd->oobsize;
926 
927 	/* Loop through the physical blocks */
928 	for (cnt = 0; cnt < (c->sector_size / c->mtd->erasesize); cnt++) {
929 		/* Check first if the block is bad. */
930 		if (c->mtd->block_isbad (c->mtd, offset)) {
931 			D1 (printk (KERN_WARNING "jffs2_check_nand_cleanmarker(): Bad block at %08x\n", jeb->offset));
932 			return 2;
933 		}
934 		/*
935 		   *    We read oob data from page 0 and 1 of the block.
936 		   *    page 0 contains cleanmarker and badblock info
937 		   *    page 1 contains failure count of this block
938 		 */
939 		ret = c->mtd->read_oob (c->mtd, offset, oob_size << 1, &retlen, buf);
940 
941 		if (ret) {
942 			D1 (printk (KERN_WARNING "jffs2_check_nand_cleanmarker(): Read OOB failed %d for block at %08x\n", ret, jeb->offset));
943 			return ret;
944 		}
945 		if (retlen < (oob_size << 1)) {
946 			D1 (printk (KERN_WARNING "jffs2_check_nand_cleanmarker(): Read OOB return short read (%zd bytes not %d) for block at %08x\n", retlen, oob_size << 1, jeb->offset));
947 			return -EIO;
948 		}
949 
950 		/* Check cleanmarker only on the first physical block */
951 		if (!cnt) {
952 			n.magic = cpu_to_je16 (JFFS2_MAGIC_BITMASK);
953 			n.nodetype = cpu_to_je16 (JFFS2_NODETYPE_CLEANMARKER);
954 			n.totlen = cpu_to_je32 (8);
955 			p = (unsigned char *) &n;
956 
957 			for (i = 0; i < c->fsdata_len; i++) {
958 				if (buf[c->fsdata_pos + i] != p[i]) {
959 					retval = 1;
960 				}
961 			}
962 			D1(if (retval == 1) {
963 				printk(KERN_WARNING "jffs2_check_nand_cleanmarker(): Cleanmarker node not detected in block at %08x\n", jeb->offset);
964 				printk(KERN_WARNING "OOB at %08x was ", offset);
965 				for (i=0; i < oob_size; i++) {
966 					printk("%02x ", buf[i]);
967 				}
968 				printk("\n");
969 			})
970 		}
971 		offset += c->mtd->erasesize;
972 	}
973 	return retval;
974 }
975 
976 int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
977 {
978 	struct 	jffs2_unknown_node n;
979 	int 	ret;
980 	size_t 	retlen;
981 
982 	n.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
983 	n.nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER);
984 	n.totlen = cpu_to_je32(8);
985 
986 	ret = jffs2_flash_write_oob(c, jeb->offset + c->fsdata_pos, c->fsdata_len, &retlen, (unsigned char *)&n);
987 
988 	if (ret) {
989 		D1(printk(KERN_WARNING "jffs2_write_nand_cleanmarker(): Write failed for block at %08x: error %d\n", jeb->offset, ret));
990 		return ret;
991 	}
992 	if (retlen != c->fsdata_len) {
993 		D1(printk(KERN_WARNING "jffs2_write_nand_cleanmarker(): Short write for block at %08x: %zd not %d\n", jeb->offset, retlen, c->fsdata_len));
994 		return ret;
995 	}
996 	return 0;
997 }
998 
999 /*
1000  * On NAND we try to mark this block bad. If the block was erased more
1001  * than MAX_ERASE_FAILURES we mark it finaly bad.
1002  * Don't care about failures. This block remains on the erase-pending
1003  * or badblock list as long as nobody manipulates the flash with
1004  * a bootloader or something like that.
1005  */
1006 
1007 int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset)
1008 {
1009 	int 	ret;
1010 
1011 	/* if the count is < max, we try to write the counter to the 2nd page oob area */
1012 	if( ++jeb->bad_count < MAX_ERASE_FAILURES)
1013 		return 0;
1014 
1015 	if (!c->mtd->block_markbad)
1016 		return 1; // What else can we do?
1017 
1018 	D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Marking bad block at %08x\n", bad_offset));
1019 	ret = c->mtd->block_markbad(c->mtd, bad_offset);
1020 
1021 	if (ret) {
1022 		D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Write failed for block at %08x: error %d\n", jeb->offset, ret));
1023 		return ret;
1024 	}
1025 	return 1;
1026 }
1027 
1028 #define NAND_JFFS2_OOB16_FSDALEN	8
1029 
1030 static struct nand_oobinfo jffs2_oobinfo_docecc = {
1031 	.useecc = MTD_NANDECC_PLACE,
1032 	.eccbytes = 6,
1033 	.eccpos = {0,1,2,3,4,5}
1034 };
1035 
1036 
1037 static int jffs2_nand_set_oobinfo(struct jffs2_sb_info *c)
1038 {
1039 	struct nand_oobinfo *oinfo = &c->mtd->oobinfo;
1040 
1041 	/* Do this only, if we have an oob buffer */
1042 	if (!c->mtd->oobsize)
1043 		return 0;
1044 
1045 	/* Cleanmarker is out-of-band, so inline size zero */
1046 	c->cleanmarker_size = 0;
1047 
1048 	/* Should we use autoplacement ? */
1049 	if (oinfo && oinfo->useecc == MTD_NANDECC_AUTOPLACE) {
1050 		D1(printk(KERN_DEBUG "JFFS2 using autoplace on NAND\n"));
1051 		/* Get the position of the free bytes */
1052 		if (!oinfo->oobfree[0][1]) {
1053 			printk (KERN_WARNING "jffs2_nand_set_oobinfo(): Eeep. Autoplacement selected and no empty space in oob\n");
1054 			return -ENOSPC;
1055 		}
1056 		c->fsdata_pos = oinfo->oobfree[0][0];
1057 		c->fsdata_len = oinfo->oobfree[0][1];
1058 		if (c->fsdata_len > 8)
1059 			c->fsdata_len = 8;
1060 	} else {
1061 		/* This is just a legacy fallback and should go away soon */
1062 		switch(c->mtd->ecctype) {
1063 		case MTD_ECC_RS_DiskOnChip:
1064 			printk(KERN_WARNING "JFFS2 using DiskOnChip hardware ECC without autoplacement. Fix it!\n");
1065 			c->oobinfo = &jffs2_oobinfo_docecc;
1066 			c->fsdata_pos = 6;
1067 			c->fsdata_len = NAND_JFFS2_OOB16_FSDALEN;
1068 			c->badblock_pos = 15;
1069 			break;
1070 
1071 		default:
1072 			D1(printk(KERN_DEBUG "JFFS2 on NAND. No autoplacment info found\n"));
1073 			return -EINVAL;
1074 		}
1075 	}
1076 	return 0;
1077 }
1078 
1079 int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
1080 {
1081 	int res;
1082 
1083 	/* Initialise write buffer */
1084 	init_rwsem(&c->wbuf_sem);
1085 	c->wbuf_pagesize = c->mtd->writesize;
1086 	c->wbuf_ofs = 0xFFFFFFFF;
1087 
1088 	c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1089 	if (!c->wbuf)
1090 		return -ENOMEM;
1091 
1092 	res = jffs2_nand_set_oobinfo(c);
1093 
1094 #ifdef BREAKME
1095 	if (!brokenbuf)
1096 		brokenbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1097 	if (!brokenbuf) {
1098 		kfree(c->wbuf);
1099 		return -ENOMEM;
1100 	}
1101 	memset(brokenbuf, 0xdb, c->wbuf_pagesize);
1102 #endif
1103 	return res;
1104 }
1105 
1106 void jffs2_nand_flash_cleanup(struct jffs2_sb_info *c)
1107 {
1108 	kfree(c->wbuf);
1109 }
1110 
1111 int jffs2_dataflash_setup(struct jffs2_sb_info *c) {
1112 	c->cleanmarker_size = 0;		/* No cleanmarkers needed */
1113 
1114 	/* Initialize write buffer */
1115 	init_rwsem(&c->wbuf_sem);
1116 
1117 
1118 	c->wbuf_pagesize =  c->mtd->erasesize;
1119 
1120 	/* Find a suitable c->sector_size
1121 	 * - Not too much sectors
1122 	 * - Sectors have to be at least 4 K + some bytes
1123 	 * - All known dataflashes have erase sizes of 528 or 1056
1124 	 * - we take at least 8 eraseblocks and want to have at least 8K size
1125 	 * - The concatenation should be a power of 2
1126 	*/
1127 
1128 	c->sector_size = 8 * c->mtd->erasesize;
1129 
1130 	while (c->sector_size < 8192) {
1131 		c->sector_size *= 2;
1132 	}
1133 
1134 	/* It may be necessary to adjust the flash size */
1135 	c->flash_size = c->mtd->size;
1136 
1137 	if ((c->flash_size % c->sector_size) != 0) {
1138 		c->flash_size = (c->flash_size / c->sector_size) * c->sector_size;
1139 		printk(KERN_WARNING "JFFS2 flash size adjusted to %dKiB\n", c->flash_size);
1140 	};
1141 
1142 	c->wbuf_ofs = 0xFFFFFFFF;
1143 	c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1144 	if (!c->wbuf)
1145 		return -ENOMEM;
1146 
1147 	printk(KERN_INFO "JFFS2 write-buffering enabled buffer (%d) erasesize (%d)\n", c->wbuf_pagesize, c->sector_size);
1148 
1149 	return 0;
1150 }
1151 
1152 void jffs2_dataflash_cleanup(struct jffs2_sb_info *c) {
1153 	kfree(c->wbuf);
1154 }
1155 
1156 int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) {
1157 	/* Cleanmarker currently occupies whole programming regions,
1158 	 * either one or 2 for 8Byte STMicro flashes. */
1159 	c->cleanmarker_size = max(16u, c->mtd->writesize);
1160 
1161 	/* Initialize write buffer */
1162 	init_rwsem(&c->wbuf_sem);
1163 	c->wbuf_pagesize = c->mtd->writesize;
1164 	c->wbuf_ofs = 0xFFFFFFFF;
1165 
1166 	c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1167 	if (!c->wbuf)
1168 		return -ENOMEM;
1169 
1170 	return 0;
1171 }
1172 
1173 void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c) {
1174 	kfree(c->wbuf);
1175 }
1176