11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds * JFFS2 -- Journalling Flash File System, Version 2.
31da177e4SLinus Torvalds *
4c00c310eSDavid Woodhouse * Copyright © 2001-2007 Red Hat, Inc.
5c00c310eSDavid Woodhouse * Copyright © 2004 Thomas Gleixner <tglx@linutronix.de>
61da177e4SLinus Torvalds *
71da177e4SLinus Torvalds * Created by David Woodhouse <dwmw2@infradead.org>
81da177e4SLinus Torvalds * Modified debugged and enhanced by Thomas Gleixner <tglx@linutronix.de>
91da177e4SLinus Torvalds *
101da177e4SLinus Torvalds * For licensing information, see the file 'LICENCE' in this directory.
111da177e4SLinus Torvalds *
121da177e4SLinus Torvalds */
131da177e4SLinus Torvalds
145a528957SJoe Perches #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
155a528957SJoe Perches
161da177e4SLinus Torvalds #include <linux/kernel.h>
171da177e4SLinus Torvalds #include <linux/slab.h>
181da177e4SLinus Torvalds #include <linux/mtd/mtd.h>
191da177e4SLinus Torvalds #include <linux/crc32.h>
20d4092d76SBoris Brezillon #include <linux/mtd/rawnand.h>
214e57b681STim Schmielau #include <linux/jiffies.h>
22914e2637SAl Viro #include <linux/sched.h>
238bdc81c5SArtem Bityutskiy #include <linux/writeback.h>
244e57b681STim Schmielau
251da177e4SLinus Torvalds #include "nodelist.h"
261da177e4SLinus Torvalds
271da177e4SLinus Torvalds /* For testing write failures */
281da177e4SLinus Torvalds #undef BREAKME
291da177e4SLinus Torvalds #undef BREAKMEHEADER
301da177e4SLinus Torvalds
311da177e4SLinus Torvalds #ifdef BREAKME
321da177e4SLinus Torvalds static unsigned char *brokenbuf;
331da177e4SLinus Torvalds #endif
341da177e4SLinus Torvalds
35daba5cc4SArtem B. Bityutskiy #define PAGE_DIV(x) ( ((unsigned long)(x) / (unsigned long)(c->wbuf_pagesize)) * (unsigned long)(c->wbuf_pagesize) )
36daba5cc4SArtem B. Bityutskiy #define PAGE_MOD(x) ( (unsigned long)(x) % (unsigned long)(c->wbuf_pagesize) )
37daba5cc4SArtem B. Bityutskiy
381da177e4SLinus Torvalds /* max. erase failures before we mark a block bad */
391da177e4SLinus Torvalds #define MAX_ERASE_FAILURES 2
401da177e4SLinus Torvalds
411da177e4SLinus Torvalds struct jffs2_inodirty {
421da177e4SLinus Torvalds uint32_t ino;
431da177e4SLinus Torvalds struct jffs2_inodirty *next;
441da177e4SLinus Torvalds };
451da177e4SLinus Torvalds
461da177e4SLinus Torvalds static struct jffs2_inodirty inodirty_nomem;
471da177e4SLinus Torvalds
jffs2_wbuf_pending_for_ino(struct jffs2_sb_info * c,uint32_t ino)481da177e4SLinus Torvalds static int jffs2_wbuf_pending_for_ino(struct jffs2_sb_info *c, uint32_t ino)
491da177e4SLinus Torvalds {
501da177e4SLinus Torvalds struct jffs2_inodirty *this = c->wbuf_inodes;
511da177e4SLinus Torvalds
521da177e4SLinus Torvalds /* If a malloc failed, consider _everything_ dirty */
531da177e4SLinus Torvalds if (this == &inodirty_nomem)
541da177e4SLinus Torvalds return 1;
551da177e4SLinus Torvalds
561da177e4SLinus Torvalds /* If ino == 0, _any_ non-GC writes mean 'yes' */
571da177e4SLinus Torvalds if (this && !ino)
581da177e4SLinus Torvalds return 1;
591da177e4SLinus Torvalds
601da177e4SLinus Torvalds /* Look to see if the inode in question is pending in the wbuf */
611da177e4SLinus Torvalds while (this) {
621da177e4SLinus Torvalds if (this->ino == ino)
631da177e4SLinus Torvalds return 1;
641da177e4SLinus Torvalds this = this->next;
651da177e4SLinus Torvalds }
661da177e4SLinus Torvalds return 0;
671da177e4SLinus Torvalds }
681da177e4SLinus Torvalds
jffs2_clear_wbuf_ino_list(struct jffs2_sb_info * c)691da177e4SLinus Torvalds static void jffs2_clear_wbuf_ino_list(struct jffs2_sb_info *c)
701da177e4SLinus Torvalds {
711da177e4SLinus Torvalds struct jffs2_inodirty *this;
721da177e4SLinus Torvalds
731da177e4SLinus Torvalds this = c->wbuf_inodes;
741da177e4SLinus Torvalds
751da177e4SLinus Torvalds if (this != &inodirty_nomem) {
761da177e4SLinus Torvalds while (this) {
771da177e4SLinus Torvalds struct jffs2_inodirty *next = this->next;
781da177e4SLinus Torvalds kfree(this);
791da177e4SLinus Torvalds this = next;
801da177e4SLinus Torvalds }
811da177e4SLinus Torvalds }
821da177e4SLinus Torvalds c->wbuf_inodes = NULL;
831da177e4SLinus Torvalds }
841da177e4SLinus Torvalds
jffs2_wbuf_dirties_inode(struct jffs2_sb_info * c,uint32_t ino)851da177e4SLinus Torvalds static void jffs2_wbuf_dirties_inode(struct jffs2_sb_info *c, uint32_t ino)
861da177e4SLinus Torvalds {
871da177e4SLinus Torvalds struct jffs2_inodirty *new;
881da177e4SLinus Torvalds
898bdc81c5SArtem Bityutskiy /* Schedule delayed write-buffer write-out */
9064a5c2ebSJoakim Tjernlund jffs2_dirty_trigger(c);
911da177e4SLinus Torvalds
921da177e4SLinus Torvalds if (jffs2_wbuf_pending_for_ino(c, ino))
931da177e4SLinus Torvalds return;
941da177e4SLinus Torvalds
951da177e4SLinus Torvalds new = kmalloc(sizeof(*new), GFP_KERNEL);
961da177e4SLinus Torvalds if (!new) {
979c261b33SJoe Perches jffs2_dbg(1, "No memory to allocate inodirty. Fallback to all considered dirty\n");
981da177e4SLinus Torvalds jffs2_clear_wbuf_ino_list(c);
991da177e4SLinus Torvalds c->wbuf_inodes = &inodirty_nomem;
1001da177e4SLinus Torvalds return;
1011da177e4SLinus Torvalds }
1021da177e4SLinus Torvalds new->ino = ino;
1031da177e4SLinus Torvalds new->next = c->wbuf_inodes;
1041da177e4SLinus Torvalds c->wbuf_inodes = new;
1051da177e4SLinus Torvalds return;
1061da177e4SLinus Torvalds }
1071da177e4SLinus Torvalds
jffs2_refile_wbuf_blocks(struct jffs2_sb_info * c)1081da177e4SLinus Torvalds static inline void jffs2_refile_wbuf_blocks(struct jffs2_sb_info *c)
1091da177e4SLinus Torvalds {
1101da177e4SLinus Torvalds struct list_head *this, *next;
1111da177e4SLinus Torvalds static int n;
1121da177e4SLinus Torvalds
1131da177e4SLinus Torvalds if (list_empty(&c->erasable_pending_wbuf_list))
1141da177e4SLinus Torvalds return;
1151da177e4SLinus Torvalds
1161da177e4SLinus Torvalds list_for_each_safe(this, next, &c->erasable_pending_wbuf_list) {
1171da177e4SLinus Torvalds struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
1181da177e4SLinus Torvalds
1199c261b33SJoe Perches jffs2_dbg(1, "Removing eraseblock at 0x%08x from erasable_pending_wbuf_list...\n",
1209c261b33SJoe Perches jeb->offset);
1211da177e4SLinus Torvalds list_del(this);
1221da177e4SLinus Torvalds if ((jiffies + (n++)) & 127) {
1231da177e4SLinus Torvalds /* Most of the time, we just erase it immediately. Otherwise we
1241da177e4SLinus Torvalds spend ages scanning it on mount, etc. */
1259c261b33SJoe Perches jffs2_dbg(1, "...and adding to erase_pending_list\n");
1261da177e4SLinus Torvalds list_add_tail(&jeb->list, &c->erase_pending_list);
1271da177e4SLinus Torvalds c->nr_erasing_blocks++;
128ae3b6ba0SDavid Woodhouse jffs2_garbage_collect_trigger(c);
1291da177e4SLinus Torvalds } else {
1301da177e4SLinus Torvalds /* Sometimes, however, we leave it elsewhere so it doesn't get
1311da177e4SLinus Torvalds immediately reused, and we spread the load a bit. */
1329c261b33SJoe Perches jffs2_dbg(1, "...and adding to erasable_list\n");
1331da177e4SLinus Torvalds list_add_tail(&jeb->list, &c->erasable_list);
1341da177e4SLinus Torvalds }
1351da177e4SLinus Torvalds }
1361da177e4SLinus Torvalds }
1371da177e4SLinus Torvalds
1387f716cf3SEstelle Hammache #define REFILE_NOTEMPTY 0
1397f716cf3SEstelle Hammache #define REFILE_ANYWAY 1
1407f716cf3SEstelle Hammache
jffs2_block_refile(struct jffs2_sb_info * c,struct jffs2_eraseblock * jeb,int allow_empty)1417f716cf3SEstelle Hammache static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int allow_empty)
1421da177e4SLinus Torvalds {
1439c261b33SJoe Perches jffs2_dbg(1, "About to refile bad block at %08x\n", jeb->offset);
1441da177e4SLinus Torvalds
1451da177e4SLinus Torvalds /* File the existing block on the bad_used_list.... */
1461da177e4SLinus Torvalds if (c->nextblock == jeb)
1471da177e4SLinus Torvalds c->nextblock = NULL;
1481da177e4SLinus Torvalds else /* Not sure this should ever happen... need more coffee */
1491da177e4SLinus Torvalds list_del(&jeb->list);
1501da177e4SLinus Torvalds if (jeb->first_node) {
1519c261b33SJoe Perches jffs2_dbg(1, "Refiling block at %08x to bad_used_list\n",
1529c261b33SJoe Perches jeb->offset);
1531da177e4SLinus Torvalds list_add(&jeb->list, &c->bad_used_list);
1541da177e4SLinus Torvalds } else {
1559b88f473SEstelle Hammache BUG_ON(allow_empty == REFILE_NOTEMPTY);
1561da177e4SLinus Torvalds /* It has to have had some nodes or we couldn't be here */
1579c261b33SJoe Perches jffs2_dbg(1, "Refiling block at %08x to erase_pending_list\n",
1589c261b33SJoe Perches jeb->offset);
1591da177e4SLinus Torvalds list_add(&jeb->list, &c->erase_pending_list);
1601da177e4SLinus Torvalds c->nr_erasing_blocks++;
161ae3b6ba0SDavid Woodhouse jffs2_garbage_collect_trigger(c);
1621da177e4SLinus Torvalds }
1631da177e4SLinus Torvalds
1649bfeb691SDavid Woodhouse if (!jffs2_prealloc_raw_node_refs(c, jeb, 1)) {
1659bfeb691SDavid Woodhouse uint32_t oldfree = jeb->free_size;
1669bfeb691SDavid Woodhouse
1679bfeb691SDavid Woodhouse jffs2_link_node_ref(c, jeb,
1689bfeb691SDavid Woodhouse (jeb->offset+c->sector_size-oldfree) | REF_OBSOLETE,
1699bfeb691SDavid Woodhouse oldfree, NULL);
1709bfeb691SDavid Woodhouse /* convert to wasted */
1719bfeb691SDavid Woodhouse c->wasted_size += oldfree;
1729bfeb691SDavid Woodhouse jeb->wasted_size += oldfree;
1739bfeb691SDavid Woodhouse c->dirty_size -= oldfree;
1749bfeb691SDavid Woodhouse jeb->dirty_size -= oldfree;
1759bfeb691SDavid Woodhouse }
1761da177e4SLinus Torvalds
177e0c8e42fSArtem B. Bityutskiy jffs2_dbg_dump_block_lists_nolock(c);
178e0c8e42fSArtem B. Bityutskiy jffs2_dbg_acct_sanity_check_nolock(c,jeb);
179e0c8e42fSArtem B. Bityutskiy jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
1801da177e4SLinus Torvalds }
1811da177e4SLinus Torvalds
jffs2_incore_replace_raw(struct jffs2_sb_info * c,struct jffs2_inode_info * f,struct jffs2_raw_node_ref * raw,union jffs2_node_union * node)1829bfeb691SDavid Woodhouse static struct jffs2_raw_node_ref **jffs2_incore_replace_raw(struct jffs2_sb_info *c,
1839bfeb691SDavid Woodhouse struct jffs2_inode_info *f,
1849bfeb691SDavid Woodhouse struct jffs2_raw_node_ref *raw,
1859bfeb691SDavid Woodhouse union jffs2_node_union *node)
1869bfeb691SDavid Woodhouse {
1879bfeb691SDavid Woodhouse struct jffs2_node_frag *frag;
1889bfeb691SDavid Woodhouse struct jffs2_full_dirent *fd;
1899bfeb691SDavid Woodhouse
1909bfeb691SDavid Woodhouse dbg_noderef("incore_replace_raw: node at %p is {%04x,%04x}\n",
1919bfeb691SDavid Woodhouse node, je16_to_cpu(node->u.magic), je16_to_cpu(node->u.nodetype));
1929bfeb691SDavid Woodhouse
1939bfeb691SDavid Woodhouse BUG_ON(je16_to_cpu(node->u.magic) != 0x1985 &&
1949bfeb691SDavid Woodhouse je16_to_cpu(node->u.magic) != 0);
1959bfeb691SDavid Woodhouse
1969bfeb691SDavid Woodhouse switch (je16_to_cpu(node->u.nodetype)) {
1979bfeb691SDavid Woodhouse case JFFS2_NODETYPE_INODE:
198ddc58bd6SDavid Woodhouse if (f->metadata && f->metadata->raw == raw) {
199ddc58bd6SDavid Woodhouse dbg_noderef("Will replace ->raw in f->metadata at %p\n", f->metadata);
200ddc58bd6SDavid Woodhouse return &f->metadata->raw;
201ddc58bd6SDavid Woodhouse }
2029bfeb691SDavid Woodhouse frag = jffs2_lookup_node_frag(&f->fragtree, je32_to_cpu(node->i.offset));
2039bfeb691SDavid Woodhouse BUG_ON(!frag);
2049bfeb691SDavid Woodhouse /* Find a frag which refers to the full_dnode we want to modify */
2059bfeb691SDavid Woodhouse while (!frag->node || frag->node->raw != raw) {
2069bfeb691SDavid Woodhouse frag = frag_next(frag);
2079bfeb691SDavid Woodhouse BUG_ON(!frag);
2089bfeb691SDavid Woodhouse }
2099bfeb691SDavid Woodhouse dbg_noderef("Will replace ->raw in full_dnode at %p\n", frag->node);
2109bfeb691SDavid Woodhouse return &frag->node->raw;
2119bfeb691SDavid Woodhouse
2129bfeb691SDavid Woodhouse case JFFS2_NODETYPE_DIRENT:
2139bfeb691SDavid Woodhouse for (fd = f->dents; fd; fd = fd->next) {
2149bfeb691SDavid Woodhouse if (fd->raw == raw) {
2159bfeb691SDavid Woodhouse dbg_noderef("Will replace ->raw in full_dirent at %p\n", fd);
2169bfeb691SDavid Woodhouse return &fd->raw;
2179bfeb691SDavid Woodhouse }
2189bfeb691SDavid Woodhouse }
2199bfeb691SDavid Woodhouse BUG();
220ddc58bd6SDavid Woodhouse
2219bfeb691SDavid Woodhouse default:
2229bfeb691SDavid Woodhouse dbg_noderef("Don't care about replacing raw for nodetype %x\n",
2239bfeb691SDavid Woodhouse je16_to_cpu(node->u.nodetype));
2249bfeb691SDavid Woodhouse break;
2259bfeb691SDavid Woodhouse }
2269bfeb691SDavid Woodhouse return NULL;
2279bfeb691SDavid Woodhouse }
2289bfeb691SDavid Woodhouse
229a6bc432eSDavid Woodhouse #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
jffs2_verify_write(struct jffs2_sb_info * c,unsigned char * buf,uint32_t ofs)230a6bc432eSDavid Woodhouse static int jffs2_verify_write(struct jffs2_sb_info *c, unsigned char *buf,
231a6bc432eSDavid Woodhouse uint32_t ofs)
232a6bc432eSDavid Woodhouse {
233a6bc432eSDavid Woodhouse int ret;
234a6bc432eSDavid Woodhouse size_t retlen;
235a6bc432eSDavid Woodhouse char *eccstr;
236a6bc432eSDavid Woodhouse
237329ad399SArtem Bityutskiy ret = mtd_read(c->mtd, ofs, c->wbuf_pagesize, &retlen, c->wbuf_verify);
238a6bc432eSDavid Woodhouse if (ret && ret != -EUCLEAN && ret != -EBADMSG) {
239da320f05SJoe Perches pr_warn("%s(): Read back of page at %08x failed: %d\n",
240da320f05SJoe Perches __func__, c->wbuf_ofs, ret);
241a6bc432eSDavid Woodhouse return ret;
242a6bc432eSDavid Woodhouse } else if (retlen != c->wbuf_pagesize) {
243da320f05SJoe Perches pr_warn("%s(): Read back of page at %08x gave short read: %zd not %d\n",
244da320f05SJoe Perches __func__, ofs, retlen, c->wbuf_pagesize);
245a6bc432eSDavid Woodhouse return -EIO;
246a6bc432eSDavid Woodhouse }
247a6bc432eSDavid Woodhouse if (!memcmp(buf, c->wbuf_verify, c->wbuf_pagesize))
248a6bc432eSDavid Woodhouse return 0;
249a6bc432eSDavid Woodhouse
250a6bc432eSDavid Woodhouse if (ret == -EUCLEAN)
251a6bc432eSDavid Woodhouse eccstr = "corrected";
252a6bc432eSDavid Woodhouse else if (ret == -EBADMSG)
253a6bc432eSDavid Woodhouse eccstr = "correction failed";
254a6bc432eSDavid Woodhouse else
255a6bc432eSDavid Woodhouse eccstr = "OK or unused";
256a6bc432eSDavid Woodhouse
257da320f05SJoe Perches pr_warn("Write verify error (ECC %s) at %08x. Wrote:\n",
258a6bc432eSDavid Woodhouse eccstr, c->wbuf_ofs);
259a6bc432eSDavid Woodhouse print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1,
260a6bc432eSDavid Woodhouse c->wbuf, c->wbuf_pagesize, 0);
261a6bc432eSDavid Woodhouse
262da320f05SJoe Perches pr_warn("Read back:\n");
263a6bc432eSDavid Woodhouse print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1,
264a6bc432eSDavid Woodhouse c->wbuf_verify, c->wbuf_pagesize, 0);
265a6bc432eSDavid Woodhouse
266a6bc432eSDavid Woodhouse return -EIO;
267a6bc432eSDavid Woodhouse }
268a6bc432eSDavid Woodhouse #else
269a6bc432eSDavid Woodhouse #define jffs2_verify_write(c,b,o) (0)
270a6bc432eSDavid Woodhouse #endif
271a6bc432eSDavid Woodhouse
2721da177e4SLinus Torvalds /* Recover from failure to write wbuf. Recover the nodes up to the
2731da177e4SLinus Torvalds * wbuf, not the one which we were starting to try to write. */
2741da177e4SLinus Torvalds
jffs2_wbuf_recover(struct jffs2_sb_info * c)2751da177e4SLinus Torvalds static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
2761da177e4SLinus Torvalds {
2771da177e4SLinus Torvalds struct jffs2_eraseblock *jeb, *new_jeb;
2789bfeb691SDavid Woodhouse struct jffs2_raw_node_ref *raw, *next, *first_raw = NULL;
2791da177e4SLinus Torvalds size_t retlen;
2801da177e4SLinus Torvalds int ret;
2819bfeb691SDavid Woodhouse int nr_refile = 0;
2821da177e4SLinus Torvalds unsigned char *buf;
2831da177e4SLinus Torvalds uint32_t start, end, ofs, len;
2841da177e4SLinus Torvalds
285046b8b98SDavid Woodhouse jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
286046b8b98SDavid Woodhouse
2871da177e4SLinus Torvalds spin_lock(&c->erase_completion_lock);
288180bfb31SVitaly Wool if (c->wbuf_ofs % c->mtd->erasesize)
2897f716cf3SEstelle Hammache jffs2_block_refile(c, jeb, REFILE_NOTEMPTY);
290180bfb31SVitaly Wool else
291180bfb31SVitaly Wool jffs2_block_refile(c, jeb, REFILE_ANYWAY);
2929bfeb691SDavid Woodhouse spin_unlock(&c->erase_completion_lock);
2939bfeb691SDavid Woodhouse
2949bfeb691SDavid Woodhouse BUG_ON(!ref_obsolete(jeb->last_node));
2951da177e4SLinus Torvalds
2961da177e4SLinus Torvalds /* Find the first node to be recovered, by skipping over every
2971da177e4SLinus Torvalds node which ends before the wbuf starts, or which is obsolete. */
2989bfeb691SDavid Woodhouse for (next = raw = jeb->first_node; next; raw = next) {
2999bfeb691SDavid Woodhouse next = ref_next(raw);
3009bfeb691SDavid Woodhouse
3019bfeb691SDavid Woodhouse if (ref_obsolete(raw) ||
3029bfeb691SDavid Woodhouse (next && ref_offset(next) <= c->wbuf_ofs)) {
3039bfeb691SDavid Woodhouse dbg_noderef("Skipping node at 0x%08x(%d)-0x%08x which is either before 0x%08x or obsolete\n",
3049bfeb691SDavid Woodhouse ref_offset(raw), ref_flags(raw),
3059bfeb691SDavid Woodhouse (ref_offset(raw) + ref_totlen(c, jeb, raw)),
3069bfeb691SDavid Woodhouse c->wbuf_ofs);
3079bfeb691SDavid Woodhouse continue;
3089bfeb691SDavid Woodhouse }
3099bfeb691SDavid Woodhouse dbg_noderef("First node to be recovered is at 0x%08x(%d)-0x%08x\n",
3109bfeb691SDavid Woodhouse ref_offset(raw), ref_flags(raw),
3119bfeb691SDavid Woodhouse (ref_offset(raw) + ref_totlen(c, jeb, raw)));
3129bfeb691SDavid Woodhouse
3139bfeb691SDavid Woodhouse first_raw = raw;
3149bfeb691SDavid Woodhouse break;
3151da177e4SLinus Torvalds }
3161da177e4SLinus Torvalds
3179bfeb691SDavid Woodhouse if (!first_raw) {
3181da177e4SLinus Torvalds /* All nodes were obsolete. Nothing to recover. */
3199c261b33SJoe Perches jffs2_dbg(1, "No non-obsolete nodes to be recovered. Just filing block bad\n");
3209bfeb691SDavid Woodhouse c->wbuf_len = 0;
3211da177e4SLinus Torvalds return;
3221da177e4SLinus Torvalds }
3231da177e4SLinus Torvalds
3249bfeb691SDavid Woodhouse start = ref_offset(first_raw);
3259bfeb691SDavid Woodhouse end = ref_offset(jeb->last_node);
3269bfeb691SDavid Woodhouse nr_refile = 1;
3271da177e4SLinus Torvalds
3289bfeb691SDavid Woodhouse /* Count the number of refs which need to be copied */
3299bfeb691SDavid Woodhouse while ((raw = ref_next(raw)) != jeb->last_node)
3309bfeb691SDavid Woodhouse nr_refile++;
3311da177e4SLinus Torvalds
3329bfeb691SDavid Woodhouse dbg_noderef("wbuf recover %08x-%08x (%d bytes in %d nodes)\n",
3339bfeb691SDavid Woodhouse start, end, end - start, nr_refile);
3341da177e4SLinus Torvalds
3351da177e4SLinus Torvalds buf = NULL;
3361da177e4SLinus Torvalds if (start < c->wbuf_ofs) {
3371da177e4SLinus Torvalds /* First affected node was already partially written.
3381da177e4SLinus Torvalds * Attempt to reread the old data into our buffer. */
3391da177e4SLinus Torvalds
3401da177e4SLinus Torvalds buf = kmalloc(end - start, GFP_KERNEL);
3411da177e4SLinus Torvalds if (!buf) {
342da320f05SJoe Perches pr_crit("Malloc failure in wbuf recovery. Data loss ensues.\n");
3431da177e4SLinus Torvalds
3441da177e4SLinus Torvalds goto read_failed;
3451da177e4SLinus Torvalds }
3461da177e4SLinus Torvalds
3471da177e4SLinus Torvalds /* Do the read... */
348329ad399SArtem Bityutskiy ret = mtd_read(c->mtd, start, c->wbuf_ofs - start, &retlen,
349329ad399SArtem Bityutskiy buf);
3501da177e4SLinus Torvalds
3519a1fcdfdSThomas Gleixner /* ECC recovered ? */
3529a1fcdfdSThomas Gleixner if ((ret == -EUCLEAN || ret == -EBADMSG) &&
3539a1fcdfdSThomas Gleixner (retlen == c->wbuf_ofs - start))
3541da177e4SLinus Torvalds ret = 0;
3559a1fcdfdSThomas Gleixner
3561da177e4SLinus Torvalds if (ret || retlen != c->wbuf_ofs - start) {
357da320f05SJoe Perches pr_crit("Old data are already lost in wbuf recovery. Data loss ensues.\n");
3581da177e4SLinus Torvalds
3591da177e4SLinus Torvalds kfree(buf);
3601da177e4SLinus Torvalds buf = NULL;
3611da177e4SLinus Torvalds read_failed:
3629bfeb691SDavid Woodhouse first_raw = ref_next(first_raw);
3639bfeb691SDavid Woodhouse nr_refile--;
3649bfeb691SDavid Woodhouse while (first_raw && ref_obsolete(first_raw)) {
3659bfeb691SDavid Woodhouse first_raw = ref_next(first_raw);
3669bfeb691SDavid Woodhouse nr_refile--;
3679bfeb691SDavid Woodhouse }
3689bfeb691SDavid Woodhouse
3691da177e4SLinus Torvalds /* If this was the only node to be recovered, give up */
3709bfeb691SDavid Woodhouse if (!first_raw) {
3719bfeb691SDavid Woodhouse c->wbuf_len = 0;
3721da177e4SLinus Torvalds return;
3739bfeb691SDavid Woodhouse }
3741da177e4SLinus Torvalds
3751da177e4SLinus Torvalds /* It wasn't. Go on and try to recover nodes complete in the wbuf */
3769bfeb691SDavid Woodhouse start = ref_offset(first_raw);
3779bfeb691SDavid Woodhouse dbg_noderef("wbuf now recover %08x-%08x (%d bytes in %d nodes)\n",
3789bfeb691SDavid Woodhouse start, end, end - start, nr_refile);
3799bfeb691SDavid Woodhouse
3801da177e4SLinus Torvalds } else {
3811da177e4SLinus Torvalds /* Read succeeded. Copy the remaining data from the wbuf */
3821da177e4SLinus Torvalds memcpy(buf + (c->wbuf_ofs - start), c->wbuf, end - c->wbuf_ofs);
3831da177e4SLinus Torvalds }
3841da177e4SLinus Torvalds }
3851da177e4SLinus Torvalds /* OK... we're to rewrite (end-start) bytes of data from first_raw onwards.
3861da177e4SLinus Torvalds Either 'buf' contains the data, or we find it in the wbuf */
3871da177e4SLinus Torvalds
3881da177e4SLinus Torvalds /* ... and get an allocation of space from a shiny new block instead */
3899fe4854cSDavid Woodhouse ret = jffs2_reserve_space_gc(c, end-start, &len, JFFS2_SUMMARY_NOSUM_SIZE);
3901da177e4SLinus Torvalds if (ret) {
391da320f05SJoe Perches pr_warn("Failed to allocate space for wbuf recovery. Data loss ensues.\n");
3921da177e4SLinus Torvalds kfree(buf);
3931da177e4SLinus Torvalds return;
3941da177e4SLinus Torvalds }
3959bfeb691SDavid Woodhouse
3967f762ab2SAdrian Hunter /* The summary is not recovered, so it must be disabled for this erase block */
3977f762ab2SAdrian Hunter jffs2_sum_disable_collecting(c->summary);
3987f762ab2SAdrian Hunter
3999bfeb691SDavid Woodhouse ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, nr_refile);
4009bfeb691SDavid Woodhouse if (ret) {
401da320f05SJoe Perches pr_warn("Failed to allocate node refs for wbuf recovery. Data loss ensues.\n");
4029bfeb691SDavid Woodhouse kfree(buf);
4039bfeb691SDavid Woodhouse return;
4049bfeb691SDavid Woodhouse }
4059bfeb691SDavid Woodhouse
4069fe4854cSDavid Woodhouse ofs = write_ofs(c);
4079fe4854cSDavid Woodhouse
4081da177e4SLinus Torvalds if (end-start >= c->wbuf_pagesize) {
4097f716cf3SEstelle Hammache /* Need to do another write immediately, but it's possible
4107f716cf3SEstelle Hammache that this is just because the wbuf itself is completely
4117f716cf3SEstelle Hammache full, and there's nothing earlier read back from the
4127f716cf3SEstelle Hammache flash. Hence 'buf' isn't necessarily what we're writing
4137f716cf3SEstelle Hammache from. */
4147f716cf3SEstelle Hammache unsigned char *rewrite_buf = buf?:c->wbuf;
4151da177e4SLinus Torvalds uint32_t towrite = (end-start) - ((end-start)%c->wbuf_pagesize);
4161da177e4SLinus Torvalds
4179c261b33SJoe Perches jffs2_dbg(1, "Write 0x%x bytes at 0x%08x in wbuf recover\n",
4189c261b33SJoe Perches towrite, ofs);
4191da177e4SLinus Torvalds
4201da177e4SLinus Torvalds #ifdef BREAKMEHEADER
4211da177e4SLinus Torvalds static int breakme;
4221da177e4SLinus Torvalds if (breakme++ == 20) {
423da320f05SJoe Perches pr_notice("Faking write error at 0x%08x\n", ofs);
4241da177e4SLinus Torvalds breakme = 0;
425eda95cbfSArtem Bityutskiy mtd_write(c->mtd, ofs, towrite, &retlen, brokenbuf);
4261da177e4SLinus Torvalds ret = -EIO;
4271da177e4SLinus Torvalds } else
4281da177e4SLinus Torvalds #endif
429eda95cbfSArtem Bityutskiy ret = mtd_write(c->mtd, ofs, towrite, &retlen,
4309223a456SThomas Gleixner rewrite_buf);
4311da177e4SLinus Torvalds
432a6bc432eSDavid Woodhouse if (ret || retlen != towrite || jffs2_verify_write(c, rewrite_buf, ofs)) {
4331da177e4SLinus Torvalds /* Argh. We tried. Really we did. */
434da320f05SJoe Perches pr_crit("Recovery of wbuf failed due to a second write error\n");
4351da177e4SLinus Torvalds kfree(buf);
4361da177e4SLinus Torvalds
4372f785402SDavid Woodhouse if (retlen)
4389bfeb691SDavid Woodhouse jffs2_add_physical_node_ref(c, ofs | REF_OBSOLETE, ref_totlen(c, jeb, first_raw), NULL);
4391da177e4SLinus Torvalds
4401da177e4SLinus Torvalds return;
4411da177e4SLinus Torvalds }
442da320f05SJoe Perches pr_notice("Recovery of wbuf succeeded to %08x\n", ofs);
4431da177e4SLinus Torvalds
4441da177e4SLinus Torvalds c->wbuf_len = (end - start) - towrite;
4451da177e4SLinus Torvalds c->wbuf_ofs = ofs + towrite;
4467f716cf3SEstelle Hammache memmove(c->wbuf, rewrite_buf + towrite, c->wbuf_len);
4471da177e4SLinus Torvalds /* Don't muck about with c->wbuf_inodes. False positives are harmless. */
4481da177e4SLinus Torvalds } else {
4491da177e4SLinus Torvalds /* OK, now we're left with the dregs in whichever buffer we're using */
4501da177e4SLinus Torvalds if (buf) {
4511da177e4SLinus Torvalds memcpy(c->wbuf, buf, end-start);
4521da177e4SLinus Torvalds } else {
4531da177e4SLinus Torvalds memmove(c->wbuf, c->wbuf + (start - c->wbuf_ofs), end - start);
4541da177e4SLinus Torvalds }
4551da177e4SLinus Torvalds c->wbuf_ofs = ofs;
4561da177e4SLinus Torvalds c->wbuf_len = end - start;
4571da177e4SLinus Torvalds }
4581da177e4SLinus Torvalds
4591da177e4SLinus Torvalds /* Now sort out the jffs2_raw_node_refs, moving them from the old to the next block */
4601da177e4SLinus Torvalds new_jeb = &c->blocks[ofs / c->sector_size];
4611da177e4SLinus Torvalds
4621da177e4SLinus Torvalds spin_lock(&c->erase_completion_lock);
4639bfeb691SDavid Woodhouse for (raw = first_raw; raw != jeb->last_node; raw = ref_next(raw)) {
4649bfeb691SDavid Woodhouse uint32_t rawlen = ref_totlen(c, jeb, raw);
4659bfeb691SDavid Woodhouse struct jffs2_inode_cache *ic;
4669bfeb691SDavid Woodhouse struct jffs2_raw_node_ref *new_ref;
4679bfeb691SDavid Woodhouse struct jffs2_raw_node_ref **adjust_ref = NULL;
4689bfeb691SDavid Woodhouse struct jffs2_inode_info *f = NULL;
4691da177e4SLinus Torvalds
4709c261b33SJoe Perches jffs2_dbg(1, "Refiling block of %08x at %08x(%d) to %08x\n",
4719c261b33SJoe Perches rawlen, ref_offset(raw), ref_flags(raw), ofs);
4721da177e4SLinus Torvalds
4739bfeb691SDavid Woodhouse ic = jffs2_raw_ref_to_ic(raw);
4749bfeb691SDavid Woodhouse
4759bfeb691SDavid Woodhouse /* Ick. This XATTR mess should be fixed shortly... */
4769bfeb691SDavid Woodhouse if (ic && ic->class == RAWNODE_CLASS_XATTR_DATUM) {
4779bfeb691SDavid Woodhouse struct jffs2_xattr_datum *xd = (void *)ic;
4789bfeb691SDavid Woodhouse BUG_ON(xd->node != raw);
4799bfeb691SDavid Woodhouse adjust_ref = &xd->node;
4809bfeb691SDavid Woodhouse raw->next_in_ino = NULL;
4819bfeb691SDavid Woodhouse ic = NULL;
4829bfeb691SDavid Woodhouse } else if (ic && ic->class == RAWNODE_CLASS_XATTR_REF) {
4839bfeb691SDavid Woodhouse struct jffs2_xattr_datum *xr = (void *)ic;
4849bfeb691SDavid Woodhouse BUG_ON(xr->node != raw);
4859bfeb691SDavid Woodhouse adjust_ref = &xr->node;
4869bfeb691SDavid Woodhouse raw->next_in_ino = NULL;
4879bfeb691SDavid Woodhouse ic = NULL;
4889bfeb691SDavid Woodhouse } else if (ic && ic->class == RAWNODE_CLASS_INODE_CACHE) {
4899bfeb691SDavid Woodhouse struct jffs2_raw_node_ref **p = &ic->nodes;
4909bfeb691SDavid Woodhouse
4919bfeb691SDavid Woodhouse /* Remove the old node from the per-inode list */
4929bfeb691SDavid Woodhouse while (*p && *p != (void *)ic) {
4939bfeb691SDavid Woodhouse if (*p == raw) {
4949bfeb691SDavid Woodhouse (*p) = (raw->next_in_ino);
4959bfeb691SDavid Woodhouse raw->next_in_ino = NULL;
4969bfeb691SDavid Woodhouse break;
4979bfeb691SDavid Woodhouse }
4989bfeb691SDavid Woodhouse p = &((*p)->next_in_ino);
4999bfeb691SDavid Woodhouse }
5009bfeb691SDavid Woodhouse
5019bfeb691SDavid Woodhouse if (ic->state == INO_STATE_PRESENT && !ref_obsolete(raw)) {
5029bfeb691SDavid Woodhouse /* If it's an in-core inode, then we have to adjust any
5039bfeb691SDavid Woodhouse full_dirent or full_dnode structure to point to the
5049bfeb691SDavid Woodhouse new version instead of the old */
50527c72b04SDavid Woodhouse f = jffs2_gc_fetch_inode(c, ic->ino, !ic->pino_nlink);
5069bfeb691SDavid Woodhouse if (IS_ERR(f)) {
5079bfeb691SDavid Woodhouse /* Should never happen; it _must_ be present */
5089bfeb691SDavid Woodhouse JFFS2_ERROR("Failed to iget() ino #%u, err %ld\n",
5099bfeb691SDavid Woodhouse ic->ino, PTR_ERR(f));
5109bfeb691SDavid Woodhouse BUG();
5119bfeb691SDavid Woodhouse }
5129bfeb691SDavid Woodhouse /* We don't lock f->sem. There's a number of ways we could
5139bfeb691SDavid Woodhouse end up in here with it already being locked, and nobody's
5149bfeb691SDavid Woodhouse going to modify it on us anyway because we hold the
5159bfeb691SDavid Woodhouse alloc_sem. We're only changing one ->raw pointer too,
5169bfeb691SDavid Woodhouse which we can get away with without upsetting readers. */
5179bfeb691SDavid Woodhouse adjust_ref = jffs2_incore_replace_raw(c, f, raw,
5189bfeb691SDavid Woodhouse (void *)(buf?:c->wbuf) + (ref_offset(raw) - start));
5199bfeb691SDavid Woodhouse } else if (unlikely(ic->state != INO_STATE_PRESENT &&
5209bfeb691SDavid Woodhouse ic->state != INO_STATE_CHECKEDABSENT &&
5219bfeb691SDavid Woodhouse ic->state != INO_STATE_GC)) {
5229bfeb691SDavid Woodhouse JFFS2_ERROR("Inode #%u is in strange state %d!\n", ic->ino, ic->state);
5239bfeb691SDavid Woodhouse BUG();
5249bfeb691SDavid Woodhouse }
5259bfeb691SDavid Woodhouse }
5269bfeb691SDavid Woodhouse
5279bfeb691SDavid Woodhouse new_ref = jffs2_link_node_ref(c, new_jeb, ofs | ref_flags(raw), rawlen, ic);
5289bfeb691SDavid Woodhouse
5299bfeb691SDavid Woodhouse if (adjust_ref) {
5309bfeb691SDavid Woodhouse BUG_ON(*adjust_ref != raw);
5319bfeb691SDavid Woodhouse *adjust_ref = new_ref;
5329bfeb691SDavid Woodhouse }
5339bfeb691SDavid Woodhouse if (f)
5349bfeb691SDavid Woodhouse jffs2_gc_release_inode(c, f);
5359bfeb691SDavid Woodhouse
5369bfeb691SDavid Woodhouse if (!ref_obsolete(raw)) {
5371da177e4SLinus Torvalds jeb->dirty_size += rawlen;
5381da177e4SLinus Torvalds jeb->used_size -= rawlen;
5391da177e4SLinus Torvalds c->dirty_size += rawlen;
5409bfeb691SDavid Woodhouse c->used_size -= rawlen;
5419bfeb691SDavid Woodhouse raw->flash_offset = ref_offset(raw) | REF_OBSOLETE;
5429bfeb691SDavid Woodhouse BUG_ON(raw->next_in_ino);
5431da177e4SLinus Torvalds }
5441da177e4SLinus Torvalds ofs += rawlen;
5451da177e4SLinus Torvalds }
5461da177e4SLinus Torvalds
5479bfeb691SDavid Woodhouse kfree(buf);
5489bfeb691SDavid Woodhouse
5491da177e4SLinus Torvalds /* Fix up the original jeb now it's on the bad_list */
5509bfeb691SDavid Woodhouse if (first_raw == jeb->first_node) {
5519c261b33SJoe Perches jffs2_dbg(1, "Failing block at %08x is now empty. Moving to erase_pending_list\n",
5529c261b33SJoe Perches jeb->offset);
553f116629dSAkinobu Mita list_move(&jeb->list, &c->erase_pending_list);
5541da177e4SLinus Torvalds c->nr_erasing_blocks++;
555ae3b6ba0SDavid Woodhouse jffs2_garbage_collect_trigger(c);
5561da177e4SLinus Torvalds }
5571da177e4SLinus Torvalds
558e0c8e42fSArtem B. Bityutskiy jffs2_dbg_acct_sanity_check_nolock(c, jeb);
559e0c8e42fSArtem B. Bityutskiy jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
5601da177e4SLinus Torvalds
561e0c8e42fSArtem B. Bityutskiy jffs2_dbg_acct_sanity_check_nolock(c, new_jeb);
562e0c8e42fSArtem B. Bityutskiy jffs2_dbg_acct_paranoia_check_nolock(c, new_jeb);
5631da177e4SLinus Torvalds
5641da177e4SLinus Torvalds spin_unlock(&c->erase_completion_lock);
5651da177e4SLinus Torvalds
5669c261b33SJoe Perches jffs2_dbg(1, "wbuf recovery completed OK. wbuf_ofs 0x%08x, len 0x%x\n",
5679c261b33SJoe Perches c->wbuf_ofs, c->wbuf_len);
5689bfeb691SDavid Woodhouse
5691da177e4SLinus Torvalds }
5701da177e4SLinus Torvalds
5711da177e4SLinus Torvalds /* Meaning of pad argument:
5721da177e4SLinus Torvalds 0: Do not pad. Probably pointless - we only ever use this when we can't pad anyway.
5731da177e4SLinus Torvalds 1: Pad, do not adjust nextblock free_size
5741da177e4SLinus Torvalds 2: Pad, adjust nextblock free_size
5751da177e4SLinus Torvalds */
5761da177e4SLinus Torvalds #define NOPAD 0
5771da177e4SLinus Torvalds #define PAD_NOACCOUNT 1
5781da177e4SLinus Torvalds #define PAD_ACCOUNTING 2
5791da177e4SLinus Torvalds
__jffs2_flush_wbuf(struct jffs2_sb_info * c,int pad)5801da177e4SLinus Torvalds static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
5811da177e4SLinus Torvalds {
5829bfeb691SDavid Woodhouse struct jffs2_eraseblock *wbuf_jeb;
5831da177e4SLinus Torvalds int ret;
5841da177e4SLinus Torvalds size_t retlen;
5851da177e4SLinus Torvalds
5863be36675SAndrew Victor /* Nothing to do if not write-buffering the flash. In particular, we shouldn't
5871da177e4SLinus Torvalds del_timer() the timer we never initialised. */
5883be36675SAndrew Victor if (!jffs2_is_writebuffered(c))
5891da177e4SLinus Torvalds return 0;
5901da177e4SLinus Torvalds
59151b11e36SAlexey Khoroshilov if (!mutex_is_locked(&c->alloc_sem)) {
592da320f05SJoe Perches pr_crit("jffs2_flush_wbuf() called with alloc_sem not locked!\n");
5931da177e4SLinus Torvalds BUG();
5941da177e4SLinus Torvalds }
5951da177e4SLinus Torvalds
5963be36675SAndrew Victor if (!c->wbuf_len) /* already checked c->wbuf above */
5971da177e4SLinus Torvalds return 0;
5981da177e4SLinus Torvalds
5999bfeb691SDavid Woodhouse wbuf_jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
6009bfeb691SDavid Woodhouse if (jffs2_prealloc_raw_node_refs(c, wbuf_jeb, c->nextblock->allocated_refs + 1))
6012f785402SDavid Woodhouse return -ENOMEM;
6022f785402SDavid Woodhouse
6031da177e4SLinus Torvalds /* claim remaining space on the page
6041da177e4SLinus Torvalds this happens, if we have a change to a new block,
6051da177e4SLinus Torvalds or if fsync forces us to flush the writebuffer.
6061da177e4SLinus Torvalds if we have a switch to next page, we will not have
6071da177e4SLinus Torvalds enough remaining space for this.
6081da177e4SLinus Torvalds */
609daba5cc4SArtem B. Bityutskiy if (pad ) {
6101da177e4SLinus Torvalds c->wbuf_len = PAD(c->wbuf_len);
6111da177e4SLinus Torvalds
6121da177e4SLinus Torvalds /* Pad with JFFS2_DIRTY_BITMASK initially. this helps out ECC'd NOR
6131da177e4SLinus Torvalds with 8 byte page size */
6141da177e4SLinus Torvalds memset(c->wbuf + c->wbuf_len, 0, c->wbuf_pagesize - c->wbuf_len);
6151da177e4SLinus Torvalds
6161da177e4SLinus Torvalds if ( c->wbuf_len + sizeof(struct jffs2_unknown_node) < c->wbuf_pagesize) {
6171da177e4SLinus Torvalds struct jffs2_unknown_node *padnode = (void *)(c->wbuf + c->wbuf_len);
6181da177e4SLinus Torvalds padnode->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
6191da177e4SLinus Torvalds padnode->nodetype = cpu_to_je16(JFFS2_NODETYPE_PADDING);
6201da177e4SLinus Torvalds padnode->totlen = cpu_to_je32(c->wbuf_pagesize - c->wbuf_len);
6211da177e4SLinus Torvalds padnode->hdr_crc = cpu_to_je32(crc32(0, padnode, sizeof(*padnode)-4));
6221da177e4SLinus Torvalds }
6231da177e4SLinus Torvalds }
6241da177e4SLinus Torvalds /* else jffs2_flash_writev has actually filled in the rest of the
6251da177e4SLinus Torvalds buffer for us, and will deal with the node refs etc. later. */
6261da177e4SLinus Torvalds
6271da177e4SLinus Torvalds #ifdef BREAKME
6281da177e4SLinus Torvalds static int breakme;
6291da177e4SLinus Torvalds if (breakme++ == 20) {
630da320f05SJoe Perches pr_notice("Faking write error at 0x%08x\n", c->wbuf_ofs);
6311da177e4SLinus Torvalds breakme = 0;
632eda95cbfSArtem Bityutskiy mtd_write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen,
6339223a456SThomas Gleixner brokenbuf);
6341da177e4SLinus Torvalds ret = -EIO;
6351da177e4SLinus Torvalds } else
6361da177e4SLinus Torvalds #endif
6371da177e4SLinus Torvalds
638eda95cbfSArtem Bityutskiy ret = mtd_write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize,
639eda95cbfSArtem Bityutskiy &retlen, c->wbuf);
6401da177e4SLinus Torvalds
641a6bc432eSDavid Woodhouse if (ret) {
642da320f05SJoe Perches pr_warn("jffs2_flush_wbuf(): Write failed with %d\n", ret);
643a6bc432eSDavid Woodhouse goto wfail;
644a6bc432eSDavid Woodhouse } else if (retlen != c->wbuf_pagesize) {
645da320f05SJoe Perches pr_warn("jffs2_flush_wbuf(): Write was short: %zd instead of %d\n",
6461da177e4SLinus Torvalds retlen, c->wbuf_pagesize);
6471da177e4SLinus Torvalds ret = -EIO;
648a6bc432eSDavid Woodhouse goto wfail;
649a6bc432eSDavid Woodhouse } else if ((ret = jffs2_verify_write(c, c->wbuf, c->wbuf_ofs))) {
650a6bc432eSDavid Woodhouse wfail:
6511da177e4SLinus Torvalds jffs2_wbuf_recover(c);
6521da177e4SLinus Torvalds
6531da177e4SLinus Torvalds return ret;
6541da177e4SLinus Torvalds }
6551da177e4SLinus Torvalds
6561da177e4SLinus Torvalds /* Adjust free size of the block if we padded. */
657daba5cc4SArtem B. Bityutskiy if (pad) {
6580bcc099dSDavid Woodhouse uint32_t waste = c->wbuf_pagesize - c->wbuf_len;
6591da177e4SLinus Torvalds
6609c261b33SJoe Perches jffs2_dbg(1, "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n",
6619c261b33SJoe Perches (wbuf_jeb == c->nextblock) ? "next" : "",
6629c261b33SJoe Perches wbuf_jeb->offset);
6631da177e4SLinus Torvalds
6641da177e4SLinus Torvalds /* wbuf_pagesize - wbuf_len is the amount of space that's to be
6651da177e4SLinus Torvalds padded. If there is less free space in the block than that,
6661da177e4SLinus Torvalds something screwed up */
6679bfeb691SDavid Woodhouse if (wbuf_jeb->free_size < waste) {
668da320f05SJoe Perches pr_crit("jffs2_flush_wbuf(): Accounting error. wbuf at 0x%08x has 0x%03x bytes, 0x%03x left.\n",
6690bcc099dSDavid Woodhouse c->wbuf_ofs, c->wbuf_len, waste);
670da320f05SJoe Perches pr_crit("jffs2_flush_wbuf(): But free_size for block at 0x%08x is only 0x%08x\n",
6719bfeb691SDavid Woodhouse wbuf_jeb->offset, wbuf_jeb->free_size);
6721da177e4SLinus Torvalds BUG();
6731da177e4SLinus Torvalds }
6740bcc099dSDavid Woodhouse
6750bcc099dSDavid Woodhouse spin_lock(&c->erase_completion_lock);
6760bcc099dSDavid Woodhouse
6779bfeb691SDavid Woodhouse jffs2_link_node_ref(c, wbuf_jeb, (c->wbuf_ofs + c->wbuf_len) | REF_OBSOLETE, waste, NULL);
6780bcc099dSDavid Woodhouse /* FIXME: that made it count as dirty. Convert to wasted */
6799bfeb691SDavid Woodhouse wbuf_jeb->dirty_size -= waste;
6800bcc099dSDavid Woodhouse c->dirty_size -= waste;
6819bfeb691SDavid Woodhouse wbuf_jeb->wasted_size += waste;
6820bcc099dSDavid Woodhouse c->wasted_size += waste;
6830bcc099dSDavid Woodhouse } else
6840bcc099dSDavid Woodhouse spin_lock(&c->erase_completion_lock);
6851da177e4SLinus Torvalds
6861da177e4SLinus Torvalds /* Stick any now-obsoleted blocks on the erase_pending_list */
6871da177e4SLinus Torvalds jffs2_refile_wbuf_blocks(c);
6881da177e4SLinus Torvalds jffs2_clear_wbuf_ino_list(c);
6891da177e4SLinus Torvalds spin_unlock(&c->erase_completion_lock);
6901da177e4SLinus Torvalds
6911da177e4SLinus Torvalds memset(c->wbuf,0xff,c->wbuf_pagesize);
6921da177e4SLinus Torvalds /* adjust write buffer offset, else we get a non contiguous write bug */
6931da177e4SLinus Torvalds c->wbuf_ofs += c->wbuf_pagesize;
6941da177e4SLinus Torvalds c->wbuf_len = 0;
6951da177e4SLinus Torvalds return 0;
6961da177e4SLinus Torvalds }
6971da177e4SLinus Torvalds
6981da177e4SLinus Torvalds /* Trigger garbage collection to flush the write-buffer.
6991da177e4SLinus Torvalds If ino arg is zero, do it if _any_ real (i.e. not GC) writes are
7001da177e4SLinus Torvalds outstanding. If ino arg non-zero, do it only if a write for the
7011da177e4SLinus Torvalds given inode is outstanding. */
jffs2_flush_wbuf_gc(struct jffs2_sb_info * c,uint32_t ino)7021da177e4SLinus Torvalds int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino)
7031da177e4SLinus Torvalds {
7041da177e4SLinus Torvalds uint32_t old_wbuf_ofs;
7051da177e4SLinus Torvalds uint32_t old_wbuf_len;
7061da177e4SLinus Torvalds int ret = 0;
7071da177e4SLinus Torvalds
7089c261b33SJoe Perches jffs2_dbg(1, "jffs2_flush_wbuf_gc() called for ino #%u...\n", ino);
7091da177e4SLinus Torvalds
7108aee6ac1SDavid Woodhouse if (!c->wbuf)
7118aee6ac1SDavid Woodhouse return 0;
7128aee6ac1SDavid Woodhouse
713ced22070SDavid Woodhouse mutex_lock(&c->alloc_sem);
7141da177e4SLinus Torvalds if (!jffs2_wbuf_pending_for_ino(c, ino)) {
7159c261b33SJoe Perches jffs2_dbg(1, "Ino #%d not pending in wbuf. Returning\n", ino);
716ced22070SDavid Woodhouse mutex_unlock(&c->alloc_sem);
7171da177e4SLinus Torvalds return 0;
7181da177e4SLinus Torvalds }
7191da177e4SLinus Torvalds
7201da177e4SLinus Torvalds old_wbuf_ofs = c->wbuf_ofs;
7211da177e4SLinus Torvalds old_wbuf_len = c->wbuf_len;
7221da177e4SLinus Torvalds
7231da177e4SLinus Torvalds if (c->unchecked_size) {
7241da177e4SLinus Torvalds /* GC won't make any progress for a while */
7259c261b33SJoe Perches jffs2_dbg(1, "%s(): padding. Not finished checking\n",
7269c261b33SJoe Perches __func__);
7271da177e4SLinus Torvalds down_write(&c->wbuf_sem);
7281da177e4SLinus Torvalds ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
7297f716cf3SEstelle Hammache /* retry flushing wbuf in case jffs2_wbuf_recover
7307f716cf3SEstelle Hammache left some data in the wbuf */
7317f716cf3SEstelle Hammache if (ret)
7327f716cf3SEstelle Hammache ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
7331da177e4SLinus Torvalds up_write(&c->wbuf_sem);
7341da177e4SLinus Torvalds } else while (old_wbuf_len &&
7351da177e4SLinus Torvalds old_wbuf_ofs == c->wbuf_ofs) {
7361da177e4SLinus Torvalds
737ced22070SDavid Woodhouse mutex_unlock(&c->alloc_sem);
7381da177e4SLinus Torvalds
7399c261b33SJoe Perches jffs2_dbg(1, "%s(): calls gc pass\n", __func__);
7401da177e4SLinus Torvalds
7411da177e4SLinus Torvalds ret = jffs2_garbage_collect_pass(c);
7421da177e4SLinus Torvalds if (ret) {
7431da177e4SLinus Torvalds /* GC failed. Flush it with padding instead */
744ced22070SDavid Woodhouse mutex_lock(&c->alloc_sem);
7451da177e4SLinus Torvalds down_write(&c->wbuf_sem);
7461da177e4SLinus Torvalds ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
7477f716cf3SEstelle Hammache /* retry flushing wbuf in case jffs2_wbuf_recover
7487f716cf3SEstelle Hammache left some data in the wbuf */
7497f716cf3SEstelle Hammache if (ret)
7507f716cf3SEstelle Hammache ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
7511da177e4SLinus Torvalds up_write(&c->wbuf_sem);
7521da177e4SLinus Torvalds break;
7531da177e4SLinus Torvalds }
754ced22070SDavid Woodhouse mutex_lock(&c->alloc_sem);
7551da177e4SLinus Torvalds }
7561da177e4SLinus Torvalds
7579c261b33SJoe Perches jffs2_dbg(1, "%s(): ends...\n", __func__);
7581da177e4SLinus Torvalds
759ced22070SDavid Woodhouse mutex_unlock(&c->alloc_sem);
7601da177e4SLinus Torvalds return ret;
7611da177e4SLinus Torvalds }
7621da177e4SLinus Torvalds
7631da177e4SLinus Torvalds /* Pad write-buffer to end and write it, wasting space. */
jffs2_flush_wbuf_pad(struct jffs2_sb_info * c)7641da177e4SLinus Torvalds int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c)
7651da177e4SLinus Torvalds {
7661da177e4SLinus Torvalds int ret;
7671da177e4SLinus Torvalds
7688aee6ac1SDavid Woodhouse if (!c->wbuf)
7698aee6ac1SDavid Woodhouse return 0;
7708aee6ac1SDavid Woodhouse
7711da177e4SLinus Torvalds down_write(&c->wbuf_sem);
7721da177e4SLinus Torvalds ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
7737f716cf3SEstelle Hammache /* retry - maybe wbuf recover left some data in wbuf. */
7747f716cf3SEstelle Hammache if (ret)
7757f716cf3SEstelle Hammache ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
7761da177e4SLinus Torvalds up_write(&c->wbuf_sem);
7771da177e4SLinus Torvalds
7781da177e4SLinus Torvalds return ret;
7791da177e4SLinus Torvalds }
7801da177e4SLinus Torvalds
jffs2_fill_wbuf(struct jffs2_sb_info * c,const uint8_t * buf,size_t len)781dcb09328SThomas Gleixner static size_t jffs2_fill_wbuf(struct jffs2_sb_info *c, const uint8_t *buf,
782dcb09328SThomas Gleixner size_t len)
783dcb09328SThomas Gleixner {
784dcb09328SThomas Gleixner if (len && !c->wbuf_len && (len >= c->wbuf_pagesize))
785dcb09328SThomas Gleixner return 0;
786dcb09328SThomas Gleixner
787dcb09328SThomas Gleixner if (len > (c->wbuf_pagesize - c->wbuf_len))
788dcb09328SThomas Gleixner len = c->wbuf_pagesize - c->wbuf_len;
789dcb09328SThomas Gleixner memcpy(c->wbuf + c->wbuf_len, buf, len);
790dcb09328SThomas Gleixner c->wbuf_len += (uint32_t) len;
791dcb09328SThomas Gleixner return len;
792dcb09328SThomas Gleixner }
793dcb09328SThomas Gleixner
jffs2_flash_writev(struct jffs2_sb_info * c,const struct kvec * invecs,unsigned long count,loff_t to,size_t * retlen,uint32_t ino)794dcb09328SThomas Gleixner int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs,
795dcb09328SThomas Gleixner unsigned long count, loff_t to, size_t *retlen,
796dcb09328SThomas Gleixner uint32_t ino)
797dcb09328SThomas Gleixner {
798dcb09328SThomas Gleixner struct jffs2_eraseblock *jeb;
799dcb09328SThomas Gleixner size_t wbuf_retlen, donelen = 0;
800dcb09328SThomas Gleixner uint32_t outvec_to = to;
801dcb09328SThomas Gleixner int ret, invec;
802dcb09328SThomas Gleixner
803dcb09328SThomas Gleixner /* If not writebuffered flash, don't bother */
8043be36675SAndrew Victor if (!jffs2_is_writebuffered(c))
8051da177e4SLinus Torvalds return jffs2_flash_direct_writev(c, invecs, count, to, retlen);
8061da177e4SLinus Torvalds
8071da177e4SLinus Torvalds down_write(&c->wbuf_sem);
8081da177e4SLinus Torvalds
8091da177e4SLinus Torvalds /* If wbuf_ofs is not initialized, set it to target address */
8101da177e4SLinus Torvalds if (c->wbuf_ofs == 0xFFFFFFFF) {
8111da177e4SLinus Torvalds c->wbuf_ofs = PAGE_DIV(to);
8121da177e4SLinus Torvalds c->wbuf_len = PAGE_MOD(to);
8131da177e4SLinus Torvalds memset(c->wbuf,0xff,c->wbuf_pagesize);
8141da177e4SLinus Torvalds }
8151da177e4SLinus Torvalds
816dcb09328SThomas Gleixner /*
817dcb09328SThomas Gleixner * Sanity checks on target address. It's permitted to write
818dcb09328SThomas Gleixner * at PAD(c->wbuf_len+c->wbuf_ofs), and it's permitted to
819dcb09328SThomas Gleixner * write at the beginning of a new erase block. Anything else,
820dcb09328SThomas Gleixner * and you die. New block starts at xxx000c (0-b = block
821dcb09328SThomas Gleixner * header)
8221da177e4SLinus Torvalds */
8233be36675SAndrew Victor if (SECTOR_ADDR(to) != SECTOR_ADDR(c->wbuf_ofs)) {
8241da177e4SLinus Torvalds /* It's a write to a new block */
8251da177e4SLinus Torvalds if (c->wbuf_len) {
8269c261b33SJoe Perches jffs2_dbg(1, "%s(): to 0x%lx causes flush of wbuf at 0x%08x\n",
8279c261b33SJoe Perches __func__, (unsigned long)to, c->wbuf_ofs);
8281da177e4SLinus Torvalds ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
829dcb09328SThomas Gleixner if (ret)
830dcb09328SThomas Gleixner goto outerr;
8311da177e4SLinus Torvalds }
8321da177e4SLinus Torvalds /* set pointer to new block */
8331da177e4SLinus Torvalds c->wbuf_ofs = PAGE_DIV(to);
8341da177e4SLinus Torvalds c->wbuf_len = PAGE_MOD(to);
8351da177e4SLinus Torvalds }
8361da177e4SLinus Torvalds
8371da177e4SLinus Torvalds if (to != PAD(c->wbuf_ofs + c->wbuf_len)) {
8381da177e4SLinus Torvalds /* We're not writing immediately after the writebuffer. Bad. */
839da320f05SJoe Perches pr_crit("%s(): Non-contiguous write to %08lx\n",
8409c261b33SJoe Perches __func__, (unsigned long)to);
8411da177e4SLinus Torvalds if (c->wbuf_len)
842da320f05SJoe Perches pr_crit("wbuf was previously %08x-%08x\n",
8431da177e4SLinus Torvalds c->wbuf_ofs, c->wbuf_ofs + c->wbuf_len);
8441da177e4SLinus Torvalds BUG();
8451da177e4SLinus Torvalds }
8461da177e4SLinus Torvalds
8471da177e4SLinus Torvalds /* adjust alignment offset */
8481da177e4SLinus Torvalds if (c->wbuf_len != PAGE_MOD(to)) {
8491da177e4SLinus Torvalds c->wbuf_len = PAGE_MOD(to);
8501da177e4SLinus Torvalds /* take care of alignment to next page */
851dcb09328SThomas Gleixner if (!c->wbuf_len) {
8521da177e4SLinus Torvalds c->wbuf_len = c->wbuf_pagesize;
8531da177e4SLinus Torvalds ret = __jffs2_flush_wbuf(c, NOPAD);
854dcb09328SThomas Gleixner if (ret)
855dcb09328SThomas Gleixner goto outerr;
8561da177e4SLinus Torvalds }
8571da177e4SLinus Torvalds }
8581da177e4SLinus Torvalds
859dcb09328SThomas Gleixner for (invec = 0; invec < count; invec++) {
860dcb09328SThomas Gleixner int vlen = invecs[invec].iov_len;
861dcb09328SThomas Gleixner uint8_t *v = invecs[invec].iov_base;
8621da177e4SLinus Torvalds
863dcb09328SThomas Gleixner wbuf_retlen = jffs2_fill_wbuf(c, v, vlen);
8641da177e4SLinus Torvalds
865dcb09328SThomas Gleixner if (c->wbuf_len == c->wbuf_pagesize) {
866dcb09328SThomas Gleixner ret = __jffs2_flush_wbuf(c, NOPAD);
867dcb09328SThomas Gleixner if (ret)
868dcb09328SThomas Gleixner goto outerr;
8691da177e4SLinus Torvalds }
870dcb09328SThomas Gleixner vlen -= wbuf_retlen;
871dcb09328SThomas Gleixner outvec_to += wbuf_retlen;
8721da177e4SLinus Torvalds donelen += wbuf_retlen;
873dcb09328SThomas Gleixner v += wbuf_retlen;
8741da177e4SLinus Torvalds
875dcb09328SThomas Gleixner if (vlen >= c->wbuf_pagesize) {
876eda95cbfSArtem Bityutskiy ret = mtd_write(c->mtd, outvec_to, PAGE_DIV(vlen),
877dcb09328SThomas Gleixner &wbuf_retlen, v);
878dcb09328SThomas Gleixner if (ret < 0 || wbuf_retlen != PAGE_DIV(vlen))
879dcb09328SThomas Gleixner goto outfile;
880dcb09328SThomas Gleixner
881dcb09328SThomas Gleixner vlen -= wbuf_retlen;
882dcb09328SThomas Gleixner outvec_to += wbuf_retlen;
883dcb09328SThomas Gleixner c->wbuf_ofs = outvec_to;
884dcb09328SThomas Gleixner donelen += wbuf_retlen;
885dcb09328SThomas Gleixner v += wbuf_retlen;
8861da177e4SLinus Torvalds }
8871da177e4SLinus Torvalds
888dcb09328SThomas Gleixner wbuf_retlen = jffs2_fill_wbuf(c, v, vlen);
889dcb09328SThomas Gleixner if (c->wbuf_len == c->wbuf_pagesize) {
890dcb09328SThomas Gleixner ret = __jffs2_flush_wbuf(c, NOPAD);
891dcb09328SThomas Gleixner if (ret)
892dcb09328SThomas Gleixner goto outerr;
8931da177e4SLinus Torvalds }
8941da177e4SLinus Torvalds
895dcb09328SThomas Gleixner outvec_to += wbuf_retlen;
896dcb09328SThomas Gleixner donelen += wbuf_retlen;
8971da177e4SLinus Torvalds }
8981da177e4SLinus Torvalds
899dcb09328SThomas Gleixner /*
900dcb09328SThomas Gleixner * If there's a remainder in the wbuf and it's a non-GC write,
901dcb09328SThomas Gleixner * remember that the wbuf affects this ino
902dcb09328SThomas Gleixner */
9031da177e4SLinus Torvalds *retlen = donelen;
9041da177e4SLinus Torvalds
905e631ddbaSFerenc Havasi if (jffs2_sum_active()) {
906e631ddbaSFerenc Havasi int res = jffs2_sum_add_kvec(c, invecs, count, (uint32_t) to);
907e631ddbaSFerenc Havasi if (res)
908e631ddbaSFerenc Havasi return res;
909e631ddbaSFerenc Havasi }
910e631ddbaSFerenc Havasi
9111da177e4SLinus Torvalds if (c->wbuf_len && ino)
9121da177e4SLinus Torvalds jffs2_wbuf_dirties_inode(c, ino);
9131da177e4SLinus Torvalds
9141da177e4SLinus Torvalds ret = 0;
915dcb09328SThomas Gleixner up_write(&c->wbuf_sem);
916dcb09328SThomas Gleixner return ret;
9171da177e4SLinus Torvalds
918dcb09328SThomas Gleixner outfile:
919dcb09328SThomas Gleixner /*
920dcb09328SThomas Gleixner * At this point we have no problem, c->wbuf is empty. However
921dcb09328SThomas Gleixner * refile nextblock to avoid writing again to same address.
922dcb09328SThomas Gleixner */
923dcb09328SThomas Gleixner
924dcb09328SThomas Gleixner spin_lock(&c->erase_completion_lock);
925dcb09328SThomas Gleixner
926dcb09328SThomas Gleixner jeb = &c->blocks[outvec_to / c->sector_size];
927dcb09328SThomas Gleixner jffs2_block_refile(c, jeb, REFILE_ANYWAY);
928dcb09328SThomas Gleixner
929dcb09328SThomas Gleixner spin_unlock(&c->erase_completion_lock);
930dcb09328SThomas Gleixner
931dcb09328SThomas Gleixner outerr:
932dcb09328SThomas Gleixner *retlen = 0;
9331da177e4SLinus Torvalds up_write(&c->wbuf_sem);
9341da177e4SLinus Torvalds return ret;
9351da177e4SLinus Torvalds }
9361da177e4SLinus Torvalds
9371da177e4SLinus Torvalds /*
9381da177e4SLinus Torvalds * This is the entry for flash write.
9391da177e4SLinus Torvalds * Check, if we work on NAND FLASH, if so build an kvec and write it via vritev
9401da177e4SLinus Torvalds */
jffs2_flash_write(struct jffs2_sb_info * c,loff_t ofs,size_t len,size_t * retlen,const u_char * buf)9419bfeb691SDavid Woodhouse int jffs2_flash_write(struct jffs2_sb_info *c, loff_t ofs, size_t len,
9429bfeb691SDavid Woodhouse size_t *retlen, const u_char *buf)
9431da177e4SLinus Torvalds {
9441da177e4SLinus Torvalds struct kvec vecs[1];
9451da177e4SLinus Torvalds
9463be36675SAndrew Victor if (!jffs2_is_writebuffered(c))
947e631ddbaSFerenc Havasi return jffs2_flash_direct_write(c, ofs, len, retlen, buf);
9481da177e4SLinus Torvalds
9491da177e4SLinus Torvalds vecs[0].iov_base = (unsigned char *) buf;
9501da177e4SLinus Torvalds vecs[0].iov_len = len;
9511da177e4SLinus Torvalds return jffs2_flash_writev(c, vecs, 1, ofs, retlen, 0);
9521da177e4SLinus Torvalds }
9531da177e4SLinus Torvalds
9541da177e4SLinus Torvalds /*
9551da177e4SLinus Torvalds Handle readback from writebuffer and ECC failure return
9561da177e4SLinus Torvalds */
jffs2_flash_read(struct jffs2_sb_info * c,loff_t ofs,size_t len,size_t * retlen,u_char * buf)9571da177e4SLinus Torvalds int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, u_char *buf)
9581da177e4SLinus Torvalds {
9591da177e4SLinus Torvalds loff_t orbf = 0, owbf = 0, lwbf = 0;
9601da177e4SLinus Torvalds int ret;
9611da177e4SLinus Torvalds
9623be36675SAndrew Victor if (!jffs2_is_writebuffered(c))
963329ad399SArtem Bityutskiy return mtd_read(c->mtd, ofs, len, retlen, buf);
9641da177e4SLinus Torvalds
9653be36675SAndrew Victor /* Read flash */
966894214d1SArtem B. Bityuckiy down_read(&c->wbuf_sem);
967329ad399SArtem Bityutskiy ret = mtd_read(c->mtd, ofs, len, retlen, buf);
9681da177e4SLinus Torvalds
9699a1fcdfdSThomas Gleixner if ( (ret == -EBADMSG || ret == -EUCLEAN) && (*retlen == len) ) {
9709a1fcdfdSThomas Gleixner if (ret == -EBADMSG)
971da320f05SJoe Perches pr_warn("mtd->read(0x%zx bytes from 0x%llx) returned ECC error\n",
972da320f05SJoe Perches len, ofs);
9731da177e4SLinus Torvalds /*
9749a1fcdfdSThomas Gleixner * We have the raw data without ECC correction in the buffer,
9759a1fcdfdSThomas Gleixner * maybe we are lucky and all data or parts are correct. We
9769a1fcdfdSThomas Gleixner * check the node. If data are corrupted node check will sort
9779a1fcdfdSThomas Gleixner * it out. We keep this block, it will fail on write or erase
9789a1fcdfdSThomas Gleixner * and the we mark it bad. Or should we do that now? But we
9799a1fcdfdSThomas Gleixner * should give him a chance. Maybe we had a system crash or
9809a1fcdfdSThomas Gleixner * power loss before the ecc write or a erase was completed.
9811da177e4SLinus Torvalds * So we return success. :)
9821da177e4SLinus Torvalds */
9831da177e4SLinus Torvalds ret = 0;
9841da177e4SLinus Torvalds }
9851da177e4SLinus Torvalds
9861da177e4SLinus Torvalds /* if no writebuffer available or write buffer empty, return */
9871da177e4SLinus Torvalds if (!c->wbuf_pagesize || !c->wbuf_len)
988894214d1SArtem B. Bityuckiy goto exit;
9891da177e4SLinus Torvalds
9901da177e4SLinus Torvalds /* if we read in a different block, return */
9913be36675SAndrew Victor if (SECTOR_ADDR(ofs) != SECTOR_ADDR(c->wbuf_ofs))
992894214d1SArtem B. Bityuckiy goto exit;
9931da177e4SLinus Torvalds
9941da177e4SLinus Torvalds if (ofs >= c->wbuf_ofs) {
9951da177e4SLinus Torvalds owbf = (ofs - c->wbuf_ofs); /* offset in write buffer */
9961da177e4SLinus Torvalds if (owbf > c->wbuf_len) /* is read beyond write buffer ? */
9971da177e4SLinus Torvalds goto exit;
9981da177e4SLinus Torvalds lwbf = c->wbuf_len - owbf; /* number of bytes to copy */
9991da177e4SLinus Torvalds if (lwbf > len)
10001da177e4SLinus Torvalds lwbf = len;
10011da177e4SLinus Torvalds } else {
10021da177e4SLinus Torvalds orbf = (c->wbuf_ofs - ofs); /* offset in read buffer */
10031da177e4SLinus Torvalds if (orbf > len) /* is write beyond write buffer ? */
10041da177e4SLinus Torvalds goto exit;
10051da177e4SLinus Torvalds lwbf = len - orbf; /* number of bytes to copy */
10061da177e4SLinus Torvalds if (lwbf > c->wbuf_len)
10071da177e4SLinus Torvalds lwbf = c->wbuf_len;
10081da177e4SLinus Torvalds }
10091da177e4SLinus Torvalds if (lwbf > 0)
10101da177e4SLinus Torvalds memcpy(buf+orbf,c->wbuf+owbf,lwbf);
10111da177e4SLinus Torvalds
10121da177e4SLinus Torvalds exit:
10131da177e4SLinus Torvalds up_read(&c->wbuf_sem);
10141da177e4SLinus Torvalds return ret;
10151da177e4SLinus Torvalds }
10161da177e4SLinus Torvalds
10178593fbc6SThomas Gleixner #define NR_OOB_SCAN_PAGES 4
10188593fbc6SThomas Gleixner
101909b3fba5SDavid Woodhouse /* For historical reasons we use only 8 bytes for OOB clean marker */
102009b3fba5SDavid Woodhouse #define OOB_CM_SIZE 8
1021a7a6ace1SArtem Bityutskiy
1022a7a6ace1SArtem Bityutskiy static const struct jffs2_unknown_node oob_cleanmarker =
1023a7a6ace1SArtem Bityutskiy {
1024566865a2SDavid Woodhouse .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
1025566865a2SDavid Woodhouse .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
1026566865a2SDavid Woodhouse .totlen = constant_cpu_to_je32(8)
1027a7a6ace1SArtem Bityutskiy };
1028a7a6ace1SArtem Bityutskiy
10291da177e4SLinus Torvalds /*
1030a7a6ace1SArtem Bityutskiy * Check, if the out of band area is empty. This function knows about the clean
1031a7a6ace1SArtem Bityutskiy * marker and if it is present in OOB, treats the OOB as empty anyway.
10321da177e4SLinus Torvalds */
jffs2_check_oob_empty(struct jffs2_sb_info * c,struct jffs2_eraseblock * jeb,int mode)10338593fbc6SThomas Gleixner int jffs2_check_oob_empty(struct jffs2_sb_info *c,
10348593fbc6SThomas Gleixner struct jffs2_eraseblock *jeb, int mode)
10351da177e4SLinus Torvalds {
1036a7a6ace1SArtem Bityutskiy int i, ret;
1037a7a6ace1SArtem Bityutskiy int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
1038*745df179SMichał Kępień struct mtd_oob_ops ops = { };
10391da177e4SLinus Torvalds
10400612b9ddSBrian Norris ops.mode = MTD_OPS_AUTO_OOB;
1041a7a6ace1SArtem Bityutskiy ops.ooblen = NR_OOB_SCAN_PAGES * c->oobavail;
10428593fbc6SThomas Gleixner ops.oobbuf = c->oobbuf;
1043a7a6ace1SArtem Bityutskiy ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
10448593fbc6SThomas Gleixner ops.datbuf = NULL;
10458593fbc6SThomas Gleixner
1046fd2819bbSArtem Bityutskiy ret = mtd_read_oob(c->mtd, jeb->offset, &ops);
104774d83beaSBrian Norris if ((ret && !mtd_is_bitflip(ret)) || ops.oobretlen != ops.ooblen) {
1048da320f05SJoe Perches pr_err("cannot read OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n",
10497be26bfbSAndrew Morton jeb->offset, ops.ooblen, ops.oobretlen, ret);
105074d83beaSBrian Norris if (!ret || mtd_is_bitflip(ret))
1051a7a6ace1SArtem Bityutskiy ret = -EIO;
10528593fbc6SThomas Gleixner return ret;
10531da177e4SLinus Torvalds }
10541da177e4SLinus Torvalds
1055a7a6ace1SArtem Bityutskiy for(i = 0; i < ops.ooblen; i++) {
1056a7a6ace1SArtem Bityutskiy if (mode && i < cmlen)
1057a7a6ace1SArtem Bityutskiy /* Yeah, we know about the cleanmarker */
10581da177e4SLinus Torvalds continue;
10591da177e4SLinus Torvalds
10608593fbc6SThomas Gleixner if (ops.oobbuf[i] != 0xFF) {
10619c261b33SJoe Perches jffs2_dbg(2, "Found %02x at %x in OOB for "
10629c261b33SJoe Perches "%08x\n", ops.oobbuf[i], i, jeb->offset);
10638593fbc6SThomas Gleixner return 1;
10641da177e4SLinus Torvalds }
10651da177e4SLinus Torvalds }
10661da177e4SLinus Torvalds
10678593fbc6SThomas Gleixner return 0;
10681da177e4SLinus Torvalds }
10691da177e4SLinus Torvalds
10701da177e4SLinus Torvalds /*
1071a7a6ace1SArtem Bityutskiy * Check for a valid cleanmarker.
1072a7a6ace1SArtem Bityutskiy * Returns: 0 if a valid cleanmarker was found
1073a7a6ace1SArtem Bityutskiy * 1 if no cleanmarker was found
1074a7a6ace1SArtem Bityutskiy * negative error code if an error occurred
10751da177e4SLinus Torvalds */
jffs2_check_nand_cleanmarker(struct jffs2_sb_info * c,struct jffs2_eraseblock * jeb)10768593fbc6SThomas Gleixner int jffs2_check_nand_cleanmarker(struct jffs2_sb_info *c,
10778593fbc6SThomas Gleixner struct jffs2_eraseblock *jeb)
10781da177e4SLinus Torvalds {
1079*745df179SMichał Kępień struct mtd_oob_ops ops = { };
1080a7a6ace1SArtem Bityutskiy int ret, cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
10811da177e4SLinus Torvalds
10820612b9ddSBrian Norris ops.mode = MTD_OPS_AUTO_OOB;
1083a7a6ace1SArtem Bityutskiy ops.ooblen = cmlen;
10848593fbc6SThomas Gleixner ops.oobbuf = c->oobbuf;
1085a7a6ace1SArtem Bityutskiy ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
10868593fbc6SThomas Gleixner ops.datbuf = NULL;
10878593fbc6SThomas Gleixner
1088fd2819bbSArtem Bityutskiy ret = mtd_read_oob(c->mtd, jeb->offset, &ops);
108974d83beaSBrian Norris if ((ret && !mtd_is_bitflip(ret)) || ops.oobretlen != ops.ooblen) {
1090da320f05SJoe Perches pr_err("cannot read OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n",
10917be26bfbSAndrew Morton jeb->offset, ops.ooblen, ops.oobretlen, ret);
109274d83beaSBrian Norris if (!ret || mtd_is_bitflip(ret))
1093a7a6ace1SArtem Bityutskiy ret = -EIO;
10941da177e4SLinus Torvalds return ret;
10951da177e4SLinus Torvalds }
10968593fbc6SThomas Gleixner
1097a7a6ace1SArtem Bityutskiy return !!memcmp(&oob_cleanmarker, c->oobbuf, cmlen);
10981da177e4SLinus Torvalds }
10991da177e4SLinus Torvalds
jffs2_write_nand_cleanmarker(struct jffs2_sb_info * c,struct jffs2_eraseblock * jeb)11008593fbc6SThomas Gleixner int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c,
11018593fbc6SThomas Gleixner struct jffs2_eraseblock *jeb)
11021da177e4SLinus Torvalds {
11031da177e4SLinus Torvalds int ret;
1104*745df179SMichał Kępień struct mtd_oob_ops ops = { };
1105a7a6ace1SArtem Bityutskiy int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
11061da177e4SLinus Torvalds
11070612b9ddSBrian Norris ops.mode = MTD_OPS_AUTO_OOB;
1108a7a6ace1SArtem Bityutskiy ops.ooblen = cmlen;
1109a7a6ace1SArtem Bityutskiy ops.oobbuf = (uint8_t *)&oob_cleanmarker;
1110a7a6ace1SArtem Bityutskiy ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
11118593fbc6SThomas Gleixner ops.datbuf = NULL;
11128593fbc6SThomas Gleixner
1113a2cc5ba0SArtem Bityutskiy ret = mtd_write_oob(c->mtd, jeb->offset, &ops);
1114a7a6ace1SArtem Bityutskiy if (ret || ops.oobretlen != ops.ooblen) {
1115da320f05SJoe Perches pr_err("cannot write OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n",
11167be26bfbSAndrew Morton jeb->offset, ops.ooblen, ops.oobretlen, ret);
1117a7a6ace1SArtem Bityutskiy if (!ret)
1118a7a6ace1SArtem Bityutskiy ret = -EIO;
11191da177e4SLinus Torvalds return ret;
11201da177e4SLinus Torvalds }
1121a7a6ace1SArtem Bityutskiy
11221da177e4SLinus Torvalds return 0;
11231da177e4SLinus Torvalds }
11241da177e4SLinus Torvalds
11251da177e4SLinus Torvalds /*
11261da177e4SLinus Torvalds * On NAND we try to mark this block bad. If the block was erased more
112725985edcSLucas De Marchi * than MAX_ERASE_FAILURES we mark it finally bad.
11281da177e4SLinus Torvalds * Don't care about failures. This block remains on the erase-pending
11291da177e4SLinus Torvalds * or badblock list as long as nobody manipulates the flash with
11301da177e4SLinus Torvalds * a bootloader or something like that.
11311da177e4SLinus Torvalds */
11321da177e4SLinus Torvalds
jffs2_write_nand_badblock(struct jffs2_sb_info * c,struct jffs2_eraseblock * jeb,uint32_t bad_offset)11331da177e4SLinus Torvalds int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset)
11341da177e4SLinus Torvalds {
11351da177e4SLinus Torvalds int ret;
11361da177e4SLinus Torvalds
11371da177e4SLinus Torvalds /* if the count is < max, we try to write the counter to the 2nd page oob area */
11381da177e4SLinus Torvalds if( ++jeb->bad_count < MAX_ERASE_FAILURES)
11391da177e4SLinus Torvalds return 0;
11401da177e4SLinus Torvalds
11415a528957SJoe Perches pr_warn("marking eraseblock at %08x as bad\n", bad_offset);
11425942ddbcSArtem Bityutskiy ret = mtd_block_markbad(c->mtd, bad_offset);
11431da177e4SLinus Torvalds
11441da177e4SLinus Torvalds if (ret) {
11459c261b33SJoe Perches jffs2_dbg(1, "%s(): Write failed for block at %08x: error %d\n",
11469c261b33SJoe Perches __func__, jeb->offset, ret);
11471da177e4SLinus Torvalds return ret;
11481da177e4SLinus Torvalds }
11491da177e4SLinus Torvalds return 1;
11501da177e4SLinus Torvalds }
11511da177e4SLinus Torvalds
work_to_sb(struct work_struct * work)11528bdc81c5SArtem Bityutskiy static struct jffs2_sb_info *work_to_sb(struct work_struct *work)
11538bdc81c5SArtem Bityutskiy {
11548bdc81c5SArtem Bityutskiy struct delayed_work *dwork;
11558bdc81c5SArtem Bityutskiy
115643584c1dSGeliang Tang dwork = to_delayed_work(work);
11578bdc81c5SArtem Bityutskiy return container_of(dwork, struct jffs2_sb_info, wbuf_dwork);
11588bdc81c5SArtem Bityutskiy }
11598bdc81c5SArtem Bityutskiy
delayed_wbuf_sync(struct work_struct * work)11608bdc81c5SArtem Bityutskiy static void delayed_wbuf_sync(struct work_struct *work)
11618bdc81c5SArtem Bityutskiy {
11628bdc81c5SArtem Bityutskiy struct jffs2_sb_info *c = work_to_sb(work);
11638bdc81c5SArtem Bityutskiy struct super_block *sb = OFNI_BS_2SFFJ(c);
11648bdc81c5SArtem Bityutskiy
1165bc98a42cSDavid Howells if (!sb_rdonly(sb)) {
11668bdc81c5SArtem Bityutskiy jffs2_dbg(1, "%s()\n", __func__);
11678bdc81c5SArtem Bityutskiy jffs2_flush_wbuf_gc(c, 0);
11688bdc81c5SArtem Bityutskiy }
11698bdc81c5SArtem Bityutskiy }
11708bdc81c5SArtem Bityutskiy
jffs2_dirty_trigger(struct jffs2_sb_info * c)11718bdc81c5SArtem Bityutskiy void jffs2_dirty_trigger(struct jffs2_sb_info *c)
11728bdc81c5SArtem Bityutskiy {
11738bdc81c5SArtem Bityutskiy struct super_block *sb = OFNI_BS_2SFFJ(c);
11748bdc81c5SArtem Bityutskiy unsigned long delay;
11758bdc81c5SArtem Bityutskiy
1176bc98a42cSDavid Howells if (sb_rdonly(sb))
11778bdc81c5SArtem Bityutskiy return;
11788bdc81c5SArtem Bityutskiy
11798bdc81c5SArtem Bityutskiy delay = msecs_to_jiffies(dirty_writeback_interval * 10);
118099358a1cSAl Viro if (queue_delayed_work(system_long_wq, &c->wbuf_dwork, delay))
118199358a1cSAl Viro jffs2_dbg(1, "%s()\n", __func__);
11828bdc81c5SArtem Bityutskiy }
11838bdc81c5SArtem Bityutskiy
jffs2_nand_flash_setup(struct jffs2_sb_info * c)1184a7a6ace1SArtem Bityutskiy int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
11851da177e4SLinus Torvalds {
11861da177e4SLinus Torvalds if (!c->mtd->oobsize)
11871da177e4SLinus Torvalds return 0;
11881da177e4SLinus Torvalds
11891da177e4SLinus Torvalds /* Cleanmarker is out-of-band, so inline size zero */
11901da177e4SLinus Torvalds c->cleanmarker_size = 0;
11911da177e4SLinus Torvalds
1192f5b8aa78SBoris BREZILLON if (c->mtd->oobavail == 0) {
1193da320f05SJoe Perches pr_err("inconsistent device description\n");
11941da177e4SLinus Torvalds return -EINVAL;
11951da177e4SLinus Torvalds }
11965bd34c09SThomas Gleixner
11975a528957SJoe Perches jffs2_dbg(1, "using OOB on NAND\n");
11985bd34c09SThomas Gleixner
1199f5b8aa78SBoris BREZILLON c->oobavail = c->mtd->oobavail;
12001da177e4SLinus Torvalds
12011da177e4SLinus Torvalds /* Initialise write buffer */
12021da177e4SLinus Torvalds init_rwsem(&c->wbuf_sem);
12038bdc81c5SArtem Bityutskiy INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
120428318776SJoern Engel c->wbuf_pagesize = c->mtd->writesize;
12051da177e4SLinus Torvalds c->wbuf_ofs = 0xFFFFFFFF;
12061da177e4SLinus Torvalds
12071da177e4SLinus Torvalds c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
12081da177e4SLinus Torvalds if (!c->wbuf)
12091da177e4SLinus Torvalds return -ENOMEM;
12101da177e4SLinus Torvalds
12116da2ec56SKees Cook c->oobbuf = kmalloc_array(NR_OOB_SCAN_PAGES, c->oobavail, GFP_KERNEL);
1212a7a6ace1SArtem Bityutskiy if (!c->oobbuf) {
12131da177e4SLinus Torvalds kfree(c->wbuf);
12141da177e4SLinus Torvalds return -ENOMEM;
12151da177e4SLinus Torvalds }
1216a7a6ace1SArtem Bityutskiy
1217a6bc432eSDavid Woodhouse #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1218a6bc432eSDavid Woodhouse c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1219a6bc432eSDavid Woodhouse if (!c->wbuf_verify) {
1220a6bc432eSDavid Woodhouse kfree(c->oobbuf);
1221a6bc432eSDavid Woodhouse kfree(c->wbuf);
1222a6bc432eSDavid Woodhouse return -ENOMEM;
1223a6bc432eSDavid Woodhouse }
1224a6bc432eSDavid Woodhouse #endif
1225a7a6ace1SArtem Bityutskiy return 0;
12261da177e4SLinus Torvalds }
12271da177e4SLinus Torvalds
jffs2_nand_flash_cleanup(struct jffs2_sb_info * c)12281da177e4SLinus Torvalds void jffs2_nand_flash_cleanup(struct jffs2_sb_info *c)
12291da177e4SLinus Torvalds {
1230a6bc432eSDavid Woodhouse #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1231a6bc432eSDavid Woodhouse kfree(c->wbuf_verify);
1232a6bc432eSDavid Woodhouse #endif
12331da177e4SLinus Torvalds kfree(c->wbuf);
12348593fbc6SThomas Gleixner kfree(c->oobbuf);
12351da177e4SLinus Torvalds }
12361da177e4SLinus Torvalds
jffs2_dataflash_setup(struct jffs2_sb_info * c)12378f15fd55SAndrew Victor int jffs2_dataflash_setup(struct jffs2_sb_info *c) {
12388f15fd55SAndrew Victor c->cleanmarker_size = 0; /* No cleanmarkers needed */
12398f15fd55SAndrew Victor
12408f15fd55SAndrew Victor /* Initialize write buffer */
12418f15fd55SAndrew Victor init_rwsem(&c->wbuf_sem);
12428bdc81c5SArtem Bityutskiy INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
1243daba5cc4SArtem B. Bityutskiy c->wbuf_pagesize = c->mtd->erasesize;
1244daba5cc4SArtem B. Bityutskiy
1245daba5cc4SArtem B. Bityutskiy /* Find a suitable c->sector_size
1246daba5cc4SArtem B. Bityutskiy * - Not too much sectors
1247daba5cc4SArtem B. Bityutskiy * - Sectors have to be at least 4 K + some bytes
1248daba5cc4SArtem B. Bityutskiy * - All known dataflashes have erase sizes of 528 or 1056
1249daba5cc4SArtem B. Bityutskiy * - we take at least 8 eraseblocks and want to have at least 8K size
1250daba5cc4SArtem B. Bityutskiy * - The concatenation should be a power of 2
1251daba5cc4SArtem B. Bityutskiy */
1252daba5cc4SArtem B. Bityutskiy
1253daba5cc4SArtem B. Bityutskiy c->sector_size = 8 * c->mtd->erasesize;
1254daba5cc4SArtem B. Bityutskiy
1255daba5cc4SArtem B. Bityutskiy while (c->sector_size < 8192) {
1256daba5cc4SArtem B. Bityutskiy c->sector_size *= 2;
1257daba5cc4SArtem B. Bityutskiy }
1258daba5cc4SArtem B. Bityutskiy
1259daba5cc4SArtem B. Bityutskiy /* It may be necessary to adjust the flash size */
1260daba5cc4SArtem B. Bityutskiy c->flash_size = c->mtd->size;
1261daba5cc4SArtem B. Bityutskiy
1262daba5cc4SArtem B. Bityutskiy if ((c->flash_size % c->sector_size) != 0) {
1263daba5cc4SArtem B. Bityutskiy c->flash_size = (c->flash_size / c->sector_size) * c->sector_size;
12645a528957SJoe Perches pr_warn("flash size adjusted to %dKiB\n", c->flash_size);
1265eac44a5eSAndrew Morton }
1266daba5cc4SArtem B. Bityutskiy
1267daba5cc4SArtem B. Bityutskiy c->wbuf_ofs = 0xFFFFFFFF;
12688f15fd55SAndrew Victor c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
12698f15fd55SAndrew Victor if (!c->wbuf)
12708f15fd55SAndrew Victor return -ENOMEM;
12718f15fd55SAndrew Victor
1272cca15841Smichael #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1273cca15841Smichael c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1274cca15841Smichael if (!c->wbuf_verify) {
1275cca15841Smichael kfree(c->wbuf);
1276cca15841Smichael return -ENOMEM;
1277cca15841Smichael }
1278cca15841Smichael #endif
1279cca15841Smichael
12805a528957SJoe Perches pr_info("write-buffering enabled buffer (%d) erasesize (%d)\n",
1281da320f05SJoe Perches c->wbuf_pagesize, c->sector_size);
12828f15fd55SAndrew Victor
12838f15fd55SAndrew Victor return 0;
12848f15fd55SAndrew Victor }
12858f15fd55SAndrew Victor
jffs2_dataflash_cleanup(struct jffs2_sb_info * c)12868f15fd55SAndrew Victor void jffs2_dataflash_cleanup(struct jffs2_sb_info *c) {
1287cca15841Smichael #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1288cca15841Smichael kfree(c->wbuf_verify);
1289cca15841Smichael #endif
12908f15fd55SAndrew Victor kfree(c->wbuf);
12918f15fd55SAndrew Victor }
12928f15fd55SAndrew Victor
jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info * c)129359da721aSNicolas Pitre int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) {
1294c8b229deSJoern Engel /* Cleanmarker currently occupies whole programming regions,
1295c8b229deSJoern Engel * either one or 2 for 8Byte STMicro flashes. */
1296c8b229deSJoern Engel c->cleanmarker_size = max(16u, c->mtd->writesize);
129759da721aSNicolas Pitre
129859da721aSNicolas Pitre /* Initialize write buffer */
129959da721aSNicolas Pitre init_rwsem(&c->wbuf_sem);
13008bdc81c5SArtem Bityutskiy INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
13018bdc81c5SArtem Bityutskiy
130228318776SJoern Engel c->wbuf_pagesize = c->mtd->writesize;
130359da721aSNicolas Pitre c->wbuf_ofs = 0xFFFFFFFF;
130459da721aSNicolas Pitre
130559da721aSNicolas Pitre c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
130659da721aSNicolas Pitre if (!c->wbuf)
130759da721aSNicolas Pitre return -ENOMEM;
130859da721aSNicolas Pitre
1309bc8cec0dSMassimo Cirillo #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1310bc8cec0dSMassimo Cirillo c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1311bc8cec0dSMassimo Cirillo if (!c->wbuf_verify) {
1312bc8cec0dSMassimo Cirillo kfree(c->wbuf);
1313bc8cec0dSMassimo Cirillo return -ENOMEM;
1314bc8cec0dSMassimo Cirillo }
1315bc8cec0dSMassimo Cirillo #endif
131659da721aSNicolas Pitre return 0;
131759da721aSNicolas Pitre }
131859da721aSNicolas Pitre
jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info * c)131959da721aSNicolas Pitre void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c) {
1320bc8cec0dSMassimo Cirillo #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1321bc8cec0dSMassimo Cirillo kfree(c->wbuf_verify);
1322bc8cec0dSMassimo Cirillo #endif
132359da721aSNicolas Pitre kfree(c->wbuf);
132459da721aSNicolas Pitre }
13250029da3bSArtem Bityutskiy
jffs2_ubivol_setup(struct jffs2_sb_info * c)13260029da3bSArtem Bityutskiy int jffs2_ubivol_setup(struct jffs2_sb_info *c) {
13270029da3bSArtem Bityutskiy c->cleanmarker_size = 0;
13280029da3bSArtem Bityutskiy
13290029da3bSArtem Bityutskiy if (c->mtd->writesize == 1)
13300029da3bSArtem Bityutskiy /* We do not need write-buffer */
13310029da3bSArtem Bityutskiy return 0;
13320029da3bSArtem Bityutskiy
13330029da3bSArtem Bityutskiy init_rwsem(&c->wbuf_sem);
13348bdc81c5SArtem Bityutskiy INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
13350029da3bSArtem Bityutskiy
13360029da3bSArtem Bityutskiy c->wbuf_pagesize = c->mtd->writesize;
13370029da3bSArtem Bityutskiy c->wbuf_ofs = 0xFFFFFFFF;
13380029da3bSArtem Bityutskiy c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
13390029da3bSArtem Bityutskiy if (!c->wbuf)
13400029da3bSArtem Bityutskiy return -ENOMEM;
13410029da3bSArtem Bityutskiy
13425a528957SJoe Perches pr_info("write-buffering enabled buffer (%d) erasesize (%d)\n",
1343da320f05SJoe Perches c->wbuf_pagesize, c->sector_size);
13440029da3bSArtem Bityutskiy
13450029da3bSArtem Bityutskiy return 0;
13460029da3bSArtem Bityutskiy }
13470029da3bSArtem Bityutskiy
jffs2_ubivol_cleanup(struct jffs2_sb_info * c)13480029da3bSArtem Bityutskiy void jffs2_ubivol_cleanup(struct jffs2_sb_info *c) {
13490029da3bSArtem Bityutskiy kfree(c->wbuf);
13500029da3bSArtem Bityutskiy }
1351