xref: /openbmc/linux/fs/jffs2/nodemgmt.c (revision fcb75787)
1 /*
2  * JFFS2 -- Journalling Flash File System, Version 2.
3  *
4  * Copyright (C) 2001-2003 Red Hat, Inc.
5  *
6  * Created by David Woodhouse <dwmw2@infradead.org>
7  *
8  * For licensing information, see the file 'LICENCE' in this directory.
9  *
10  * $Id: nodemgmt.c,v 1.127 2005/09/20 15:49:12 dedekind Exp $
11  *
12  */
13 
14 #include <linux/kernel.h>
15 #include <linux/slab.h>
16 #include <linux/mtd/mtd.h>
17 #include <linux/compiler.h>
18 #include <linux/sched.h> /* For cond_resched() */
19 #include "nodelist.h"
20 #include "debug.h"
21 
22 /**
23  *	jffs2_reserve_space - request physical space to write nodes to flash
24  *	@c: superblock info
25  *	@minsize: Minimum acceptable size of allocation
26  *	@ofs: Returned value of node offset
27  *	@len: Returned value of allocation length
28  *	@prio: Allocation type - ALLOC_{NORMAL,DELETION}
29  *
30  *	Requests a block of physical space on the flash. Returns zero for success
31  *	and puts 'ofs' and 'len' into the appriopriate place, or returns -ENOSPC
32  *	or other error if appropriate.
33  *
34  *	If it returns zero, jffs2_reserve_space() also downs the per-filesystem
35  *	allocation semaphore, to prevent more than one allocation from being
36  *	active at any time. The semaphore is later released by jffs2_commit_allocation()
37  *
38  *	jffs2_reserve_space() may trigger garbage collection in order to make room
39  *	for the requested allocation.
40  */
41 
42 static int jffs2_do_reserve_space(struct jffs2_sb_info *c,  uint32_t minsize,
43 					uint32_t *ofs, uint32_t *len, uint32_t sumsize);
44 
45 int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs,
46 			uint32_t *len, int prio, uint32_t sumsize)
47 {
48 	int ret = -EAGAIN;
49 	int blocksneeded = c->resv_blocks_write;
50 	/* align it */
51 	minsize = PAD(minsize);
52 
53 	D1(printk(KERN_DEBUG "jffs2_reserve_space(): Requested 0x%x bytes\n", minsize));
54 	down(&c->alloc_sem);
55 
56 	D1(printk(KERN_DEBUG "jffs2_reserve_space(): alloc sem got\n"));
57 
58 	spin_lock(&c->erase_completion_lock);
59 
60 	/* this needs a little more thought (true <tglx> :)) */
61 	while(ret == -EAGAIN) {
62 		while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) {
63 			int ret;
64 			uint32_t dirty, avail;
65 
66 			/* calculate real dirty size
67 			 * dirty_size contains blocks on erase_pending_list
68 			 * those blocks are counted in c->nr_erasing_blocks.
69 			 * If one block is actually erased, it is not longer counted as dirty_space
70 			 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
71 			 * with c->nr_erasing_blocks * c->sector_size again.
72 			 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
73 			 * This helps us to force gc and pick eventually a clean block to spread the load.
74 			 * We add unchecked_size here, as we hopefully will find some space to use.
75 			 * This will affect the sum only once, as gc first finishes checking
76 			 * of nodes.
77 			 */
78 			dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size;
79 			if (dirty < c->nospc_dirty_size) {
80 				if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
81 					D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on dirty space to GC, but it's a deletion. Allowing...\n"));
82 					break;
83 				}
84 				D1(printk(KERN_DEBUG "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n",
85 					  dirty, c->unchecked_size, c->sector_size));
86 
87 				spin_unlock(&c->erase_completion_lock);
88 				up(&c->alloc_sem);
89 				return -ENOSPC;
90 			}
91 
92 			/* Calc possibly available space. Possibly available means that we
93 			 * don't know, if unchecked size contains obsoleted nodes, which could give us some
94 			 * more usable space. This will affect the sum only once, as gc first finishes checking
95 			 * of nodes.
96 			 + Return -ENOSPC, if the maximum possibly available space is less or equal than
97 			 * blocksneeded * sector_size.
98 			 * This blocks endless gc looping on a filesystem, which is nearly full, even if
99 			 * the check above passes.
100 			 */
101 			avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size;
102 			if ( (avail / c->sector_size) <= blocksneeded) {
103 				if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
104 					D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on possibly available space, but it's a deletion. Allowing...\n"));
105 					break;
106 				}
107 
108 				D1(printk(KERN_DEBUG "max. available size 0x%08x  < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n",
109 					  avail, blocksneeded * c->sector_size));
110 				spin_unlock(&c->erase_completion_lock);
111 				up(&c->alloc_sem);
112 				return -ENOSPC;
113 			}
114 
115 			up(&c->alloc_sem);
116 
117 			D1(printk(KERN_DEBUG "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
118 				  c->nr_free_blocks, c->nr_erasing_blocks, c->free_size, c->dirty_size, c->wasted_size, c->used_size, c->erasing_size, c->bad_size,
119 				  c->free_size + c->dirty_size + c->wasted_size + c->used_size + c->erasing_size + c->bad_size, c->flash_size));
120 			spin_unlock(&c->erase_completion_lock);
121 
122 			ret = jffs2_garbage_collect_pass(c);
123 			if (ret)
124 				return ret;
125 
126 			cond_resched();
127 
128 			if (signal_pending(current))
129 				return -EINTR;
130 
131 			down(&c->alloc_sem);
132 			spin_lock(&c->erase_completion_lock);
133 		}
134 
135 		ret = jffs2_do_reserve_space(c, minsize, ofs, len, sumsize);
136 		if (ret) {
137 			D1(printk(KERN_DEBUG "jffs2_reserve_space: ret is %d\n", ret));
138 		}
139 	}
140 	spin_unlock(&c->erase_completion_lock);
141 	if (ret)
142 		up(&c->alloc_sem);
143 	return ret;
144 }
145 
146 int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs,
147 			uint32_t *len, uint32_t sumsize)
148 {
149 	int ret = -EAGAIN;
150 	minsize = PAD(minsize);
151 
152 	D1(printk(KERN_DEBUG "jffs2_reserve_space_gc(): Requested 0x%x bytes\n", minsize));
153 
154 	spin_lock(&c->erase_completion_lock);
155 	while(ret == -EAGAIN) {
156 		ret = jffs2_do_reserve_space(c, minsize, ofs, len, sumsize);
157 		if (ret) {
158 		        D1(printk(KERN_DEBUG "jffs2_reserve_space_gc: looping, ret is %d\n", ret));
159 		}
160 	}
161 	spin_unlock(&c->erase_completion_lock);
162 	return ret;
163 }
164 
165 
166 /* Classify nextblock (clean, dirty of verydirty) and force to select an other one */
167 
168 static void jffs2_close_nextblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
169 {
170 
171 	/* Check, if we have a dirty block now, or if it was dirty already */
172 	if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) {
173 		c->dirty_size += jeb->wasted_size;
174 		c->wasted_size -= jeb->wasted_size;
175 		jeb->dirty_size += jeb->wasted_size;
176 		jeb->wasted_size = 0;
177 		if (VERYDIRTY(c, jeb->dirty_size)) {
178 			D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
179 			  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
180 			list_add_tail(&jeb->list, &c->very_dirty_list);
181 		} else {
182 			D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
183 			  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
184 			list_add_tail(&jeb->list, &c->dirty_list);
185 		}
186 	} else {
187 		D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
188 		  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
189 		list_add_tail(&jeb->list, &c->clean_list);
190 	}
191 	c->nextblock = NULL;
192 
193 }
194 
195 /* Select a new jeb for nextblock */
196 
197 static int jffs2_find_nextblock(struct jffs2_sb_info *c)
198 {
199 	struct list_head *next;
200 
201 	/* Take the next block off the 'free' list */
202 
203 	if (list_empty(&c->free_list)) {
204 
205 		if (!c->nr_erasing_blocks &&
206 			!list_empty(&c->erasable_list)) {
207 			struct jffs2_eraseblock *ejeb;
208 
209 			ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
210 			list_del(&ejeb->list);
211 			list_add_tail(&ejeb->list, &c->erase_pending_list);
212 			c->nr_erasing_blocks++;
213 			jffs2_erase_pending_trigger(c);
214 			D1(printk(KERN_DEBUG "jffs2_find_nextblock: Triggering erase of erasable block at 0x%08x\n",
215 				  ejeb->offset));
216 		}
217 
218 		if (!c->nr_erasing_blocks &&
219 			!list_empty(&c->erasable_pending_wbuf_list)) {
220 			D1(printk(KERN_DEBUG "jffs2_find_nextblock: Flushing write buffer\n"));
221 			/* c->nextblock is NULL, no update to c->nextblock allowed */
222 			spin_unlock(&c->erase_completion_lock);
223 			jffs2_flush_wbuf_pad(c);
224 			spin_lock(&c->erase_completion_lock);
225 			/* Have another go. It'll be on the erasable_list now */
226 			return -EAGAIN;
227 		}
228 
229 		if (!c->nr_erasing_blocks) {
230 			/* Ouch. We're in GC, or we wouldn't have got here.
231 			   And there's no space left. At all. */
232 			printk(KERN_CRIT "Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
233 				   c->nr_erasing_blocks, c->nr_free_blocks, list_empty(&c->erasable_list)?"yes":"no",
234 				   list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no");
235 			return -ENOSPC;
236 		}
237 
238 		spin_unlock(&c->erase_completion_lock);
239 		/* Don't wait for it; just erase one right now */
240 		jffs2_erase_pending_blocks(c, 1);
241 		spin_lock(&c->erase_completion_lock);
242 
243 		/* An erase may have failed, decreasing the
244 		   amount of free space available. So we must
245 		   restart from the beginning */
246 		return -EAGAIN;
247 	}
248 
249 	next = c->free_list.next;
250 	list_del(next);
251 	c->nextblock = list_entry(next, struct jffs2_eraseblock, list);
252 	c->nr_free_blocks--;
253 
254 	jffs2_sum_reset_collected(c->summary); /* reset collected summary */
255 
256 	D1(printk(KERN_DEBUG "jffs2_find_nextblock(): new nextblock = 0x%08x\n", c->nextblock->offset));
257 
258 	return 0;
259 }
260 
261 /* Called with alloc sem _and_ erase_completion_lock */
262 static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len, uint32_t sumsize)
263 {
264 	struct jffs2_eraseblock *jeb = c->nextblock;
265 	uint32_t reserved_size; 			/* for summary information at the end of the jeb */
266 	int ret;
267 
268  restart:
269 	reserved_size = 0;
270 
271 	if (jffs2_sum_active() && (sumsize != JFFS2_SUMMARY_NOSUM_SIZE)) {
272 							/* NOSUM_SIZE means not to generate summary */
273 
274 		if (jeb) {
275 			reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
276 			dbg_summary("minsize=%d , jeb->free=%d ,"
277 						"summary->size=%d , sumsize=%d\n",
278 						minsize, jeb->free_size,
279 						c->summary->sum_size, sumsize);
280 		}
281 
282 		/* Is there enough space for writing out the current node, or we have to
283 		   write out summary information now, close this jeb and select new nextblock? */
284 		if (jeb && (PAD(minsize) + PAD(c->summary->sum_size + sumsize +
285 					JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size)) {
286 
287 			/* Has summary been disabled for this jeb? */
288 			if (jffs2_sum_is_disabled(c->summary)) {
289 				sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
290 				goto restart;
291 			}
292 
293 			/* Writing out the collected summary information */
294 			dbg_summary("generating summary for 0x%08x.\n", jeb->offset);
295 			ret = jffs2_sum_write_sumnode(c);
296 
297 			if (ret)
298 				return ret;
299 
300 			if (jffs2_sum_is_disabled(c->summary)) {
301 				/* jffs2_write_sumnode() couldn't write out the summary information
302 				   diabling summary for this jeb and free the collected information
303 				 */
304 				sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
305 				goto restart;
306 			}
307 
308 			jffs2_close_nextblock(c, jeb);
309 			jeb = NULL;
310 			/* keep always valid value in reserved_size */
311 			reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
312 		}
313 	} else {
314 		if (jeb && minsize > jeb->free_size) {
315 			/* Skip the end of this block and file it as having some dirty space */
316 			/* If there's a pending write to it, flush now */
317 
318 			if (jffs2_wbuf_dirty(c)) {
319 				spin_unlock(&c->erase_completion_lock);
320 				D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n"));
321 				jffs2_flush_wbuf_pad(c);
322 				spin_lock(&c->erase_completion_lock);
323 				jeb = c->nextblock;
324 				goto restart;
325 			}
326 
327 			c->wasted_size += jeb->free_size;
328 			c->free_size -= jeb->free_size;
329 			jeb->wasted_size += jeb->free_size;
330 			jeb->free_size = 0;
331 
332 			jffs2_close_nextblock(c, jeb);
333 			jeb = NULL;
334 		}
335 	}
336 
337 	if (!jeb) {
338 
339 		ret = jffs2_find_nextblock(c);
340 		if (ret)
341 			return ret;
342 
343 		jeb = c->nextblock;
344 
345 		if (jeb->free_size != c->sector_size - c->cleanmarker_size) {
346 			printk(KERN_WARNING "Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n", jeb->offset, jeb->free_size);
347 			goto restart;
348 		}
349 	}
350 	/* OK, jeb (==c->nextblock) is now pointing at a block which definitely has
351 	   enough space */
352 	*ofs = jeb->offset + (c->sector_size - jeb->free_size);
353 	*len = jeb->free_size - reserved_size;
354 
355 	if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size &&
356 	    !jeb->first_node->next_in_ino) {
357 		/* Only node in it beforehand was a CLEANMARKER node (we think).
358 		   So mark it obsolete now that there's going to be another node
359 		   in the block. This will reduce used_size to zero but We've
360 		   already set c->nextblock so that jffs2_mark_node_obsolete()
361 		   won't try to refile it to the dirty_list.
362 		*/
363 		spin_unlock(&c->erase_completion_lock);
364 		jffs2_mark_node_obsolete(c, jeb->first_node);
365 		spin_lock(&c->erase_completion_lock);
366 	}
367 
368 	D1(printk(KERN_DEBUG "jffs2_do_reserve_space(): Giving 0x%x bytes at 0x%x\n", *len, *ofs));
369 	return 0;
370 }
371 
372 /**
373  *	jffs2_add_physical_node_ref - add a physical node reference to the list
374  *	@c: superblock info
375  *	@new: new node reference to add
376  *	@len: length of this physical node
377  *
378  *	Should only be used to report nodes for which space has been allocated
379  *	by jffs2_reserve_space.
380  *
381  *	Must be called with the alloc_sem held.
382  */
383 
384 int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *new,
385 				uint32_t len, struct jffs2_inode_cache *ic)
386 {
387 	struct jffs2_eraseblock *jeb;
388 
389 	jeb = &c->blocks[new->flash_offset / c->sector_size];
390 #ifdef TEST_TOTLEN
391 	new->__totlen = len;
392 #endif
393 
394 	D1(printk(KERN_DEBUG "jffs2_add_physical_node_ref(): Node at 0x%x(%d), size 0x%x\n", ref_offset(new), ref_flags(new), len));
395 #if 1
396 	/* we could get some obsolete nodes after nextblock was refiled
397 	   in wbuf.c */
398 	if ((c->nextblock || !ref_obsolete(new))
399 	    &&(jeb != c->nextblock || ref_offset(new) != jeb->offset + (c->sector_size - jeb->free_size))) {
400 		printk(KERN_WARNING "argh. node added in wrong place\n");
401 		jffs2_free_raw_node_ref(new);
402 		return -EINVAL;
403 	}
404 #endif
405 	spin_lock(&c->erase_completion_lock);
406 
407 	jffs2_link_node_ref(c, jeb, new, len, ic);
408 
409 	if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) {
410 		/* If it lives on the dirty_list, jffs2_reserve_space will put it there */
411 		D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
412 			  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
413 		if (jffs2_wbuf_dirty(c)) {
414 			/* Flush the last write in the block if it's outstanding */
415 			spin_unlock(&c->erase_completion_lock);
416 			jffs2_flush_wbuf_pad(c);
417 			spin_lock(&c->erase_completion_lock);
418 		}
419 
420 		list_add_tail(&jeb->list, &c->clean_list);
421 		c->nextblock = NULL;
422 	}
423 	jffs2_dbg_acct_sanity_check_nolock(c,jeb);
424 	jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
425 
426 	spin_unlock(&c->erase_completion_lock);
427 
428 	return 0;
429 }
430 
431 
432 void jffs2_complete_reservation(struct jffs2_sb_info *c)
433 {
434 	D1(printk(KERN_DEBUG "jffs2_complete_reservation()\n"));
435 	jffs2_garbage_collect_trigger(c);
436 	up(&c->alloc_sem);
437 }
438 
439 static inline int on_list(struct list_head *obj, struct list_head *head)
440 {
441 	struct list_head *this;
442 
443 	list_for_each(this, head) {
444 		if (this == obj) {
445 			D1(printk("%p is on list at %p\n", obj, head));
446 			return 1;
447 
448 		}
449 	}
450 	return 0;
451 }
452 
453 void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref)
454 {
455 	struct jffs2_eraseblock *jeb;
456 	int blocknr;
457 	struct jffs2_unknown_node n;
458 	int ret, addedsize;
459 	size_t retlen;
460 	uint32_t freed_len;
461 
462 	if(!ref) {
463 		printk(KERN_NOTICE "EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
464 		return;
465 	}
466 	if (ref_obsolete(ref)) {
467 		D1(printk(KERN_DEBUG "jffs2_mark_node_obsolete called with already obsolete node at 0x%08x\n", ref_offset(ref)));
468 		return;
469 	}
470 	blocknr = ref->flash_offset / c->sector_size;
471 	if (blocknr >= c->nr_blocks) {
472 		printk(KERN_NOTICE "raw node at 0x%08x is off the end of device!\n", ref->flash_offset);
473 		BUG();
474 	}
475 	jeb = &c->blocks[blocknr];
476 
477 	if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) &&
478 	    !(c->flags & (JFFS2_SB_FLAG_SCANNING | JFFS2_SB_FLAG_BUILDING))) {
479 		/* Hm. This may confuse static lock analysis. If any of the above
480 		   three conditions is false, we're going to return from this
481 		   function without actually obliterating any nodes or freeing
482 		   any jffs2_raw_node_refs. So we don't need to stop erases from
483 		   happening, or protect against people holding an obsolete
484 		   jffs2_raw_node_ref without the erase_completion_lock. */
485 		down(&c->erase_free_sem);
486 	}
487 
488 	spin_lock(&c->erase_completion_lock);
489 
490 	freed_len = ref_totlen(c, jeb, ref);
491 
492 	if (ref_flags(ref) == REF_UNCHECKED) {
493 		D1(if (unlikely(jeb->unchecked_size < freed_len)) {
494 			printk(KERN_NOTICE "raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n",
495 			       freed_len, blocknr, ref->flash_offset, jeb->used_size);
496 			BUG();
497 		})
498 		D1(printk(KERN_DEBUG "Obsoleting previously unchecked node at 0x%08x of len %x: ", ref_offset(ref), freed_len));
499 		jeb->unchecked_size -= freed_len;
500 		c->unchecked_size -= freed_len;
501 	} else {
502 		D1(if (unlikely(jeb->used_size < freed_len)) {
503 			printk(KERN_NOTICE "raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
504 			       freed_len, blocknr, ref->flash_offset, jeb->used_size);
505 			BUG();
506 		})
507 		D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %#x: ", ref_offset(ref), freed_len));
508 		jeb->used_size -= freed_len;
509 		c->used_size -= freed_len;
510 	}
511 
512 	// Take care, that wasted size is taken into concern
513 	if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + freed_len)) && jeb != c->nextblock) {
514 		D1(printk(KERN_DEBUG "Dirtying\n"));
515 		addedsize = freed_len;
516 		jeb->dirty_size += freed_len;
517 		c->dirty_size += freed_len;
518 
519 		/* Convert wasted space to dirty, if not a bad block */
520 		if (jeb->wasted_size) {
521 			if (on_list(&jeb->list, &c->bad_used_list)) {
522 				D1(printk(KERN_DEBUG "Leaving block at %08x on the bad_used_list\n",
523 					  jeb->offset));
524 				addedsize = 0; /* To fool the refiling code later */
525 			} else {
526 				D1(printk(KERN_DEBUG "Converting %d bytes of wasted space to dirty in block at %08x\n",
527 					  jeb->wasted_size, jeb->offset));
528 				addedsize += jeb->wasted_size;
529 				jeb->dirty_size += jeb->wasted_size;
530 				c->dirty_size += jeb->wasted_size;
531 				c->wasted_size -= jeb->wasted_size;
532 				jeb->wasted_size = 0;
533 			}
534 		}
535 	} else {
536 		D1(printk(KERN_DEBUG "Wasting\n"));
537 		addedsize = 0;
538 		jeb->wasted_size += freed_len;
539 		c->wasted_size += freed_len;
540 	}
541 	ref->flash_offset = ref_offset(ref) | REF_OBSOLETE;
542 
543 	jffs2_dbg_acct_sanity_check_nolock(c, jeb);
544 	jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
545 
546 	if (c->flags & JFFS2_SB_FLAG_SCANNING) {
547 		/* Flash scanning is in progress. Don't muck about with the block
548 		   lists because they're not ready yet, and don't actually
549 		   obliterate nodes that look obsolete. If they weren't
550 		   marked obsolete on the flash at the time they _became_
551 		   obsolete, there was probably a reason for that. */
552 		spin_unlock(&c->erase_completion_lock);
553 		/* We didn't lock the erase_free_sem */
554 		return;
555 	}
556 
557 	if (jeb == c->nextblock) {
558 		D2(printk(KERN_DEBUG "Not moving nextblock 0x%08x to dirty/erase_pending list\n", jeb->offset));
559 	} else if (!jeb->used_size && !jeb->unchecked_size) {
560 		if (jeb == c->gcblock) {
561 			D1(printk(KERN_DEBUG "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n", jeb->offset));
562 			c->gcblock = NULL;
563 		} else {
564 			D1(printk(KERN_DEBUG "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n", jeb->offset));
565 			list_del(&jeb->list);
566 		}
567 		if (jffs2_wbuf_dirty(c)) {
568 			D1(printk(KERN_DEBUG "...and adding to erasable_pending_wbuf_list\n"));
569 			list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list);
570 		} else {
571 			if (jiffies & 127) {
572 				/* Most of the time, we just erase it immediately. Otherwise we
573 				   spend ages scanning it on mount, etc. */
574 				D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
575 				list_add_tail(&jeb->list, &c->erase_pending_list);
576 				c->nr_erasing_blocks++;
577 				jffs2_erase_pending_trigger(c);
578 			} else {
579 				/* Sometimes, however, we leave it elsewhere so it doesn't get
580 				   immediately reused, and we spread the load a bit. */
581 				D1(printk(KERN_DEBUG "...and adding to erasable_list\n"));
582 				list_add_tail(&jeb->list, &c->erasable_list);
583 			}
584 		}
585 		D1(printk(KERN_DEBUG "Done OK\n"));
586 	} else if (jeb == c->gcblock) {
587 		D2(printk(KERN_DEBUG "Not moving gcblock 0x%08x to dirty_list\n", jeb->offset));
588 	} else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) {
589 		D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n", jeb->offset));
590 		list_del(&jeb->list);
591 		D1(printk(KERN_DEBUG "...and adding to dirty_list\n"));
592 		list_add_tail(&jeb->list, &c->dirty_list);
593 	} else if (VERYDIRTY(c, jeb->dirty_size) &&
594 		   !VERYDIRTY(c, jeb->dirty_size - addedsize)) {
595 		D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n", jeb->offset));
596 		list_del(&jeb->list);
597 		D1(printk(KERN_DEBUG "...and adding to very_dirty_list\n"));
598 		list_add_tail(&jeb->list, &c->very_dirty_list);
599 	} else {
600 		D1(printk(KERN_DEBUG "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n",
601 			  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
602 	}
603 
604 	spin_unlock(&c->erase_completion_lock);
605 
606 	if (!jffs2_can_mark_obsolete(c) || jffs2_is_readonly(c) ||
607 		(c->flags & JFFS2_SB_FLAG_BUILDING)) {
608 		/* We didn't lock the erase_free_sem */
609 		return;
610 	}
611 
612 	/* The erase_free_sem is locked, and has been since before we marked the node obsolete
613 	   and potentially put its eraseblock onto the erase_pending_list. Thus, we know that
614 	   the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet
615 	   by jffs2_free_all_node_refs() in erase.c. Which is nice. */
616 
617 	D1(printk(KERN_DEBUG "obliterating obsoleted node at 0x%08x\n", ref_offset(ref)));
618 	ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
619 	if (ret) {
620 		printk(KERN_WARNING "Read error reading from obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
621 		goto out_erase_sem;
622 	}
623 	if (retlen != sizeof(n)) {
624 		printk(KERN_WARNING "Short read from obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
625 		goto out_erase_sem;
626 	}
627 	if (PAD(je32_to_cpu(n.totlen)) != PAD(freed_len)) {
628 		printk(KERN_WARNING "Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n", je32_to_cpu(n.totlen), freed_len);
629 		goto out_erase_sem;
630 	}
631 	if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) {
632 		D1(printk(KERN_DEBUG "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n", ref_offset(ref), je16_to_cpu(n.nodetype)));
633 		goto out_erase_sem;
634 	}
635 	/* XXX FIXME: This is ugly now */
636 	n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE);
637 	ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
638 	if (ret) {
639 		printk(KERN_WARNING "Write error in obliterating obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
640 		goto out_erase_sem;
641 	}
642 	if (retlen != sizeof(n)) {
643 		printk(KERN_WARNING "Short write in obliterating obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
644 		goto out_erase_sem;
645 	}
646 
647 	/* Nodes which have been marked obsolete no longer need to be
648 	   associated with any inode. Remove them from the per-inode list.
649 
650 	   Note we can't do this for NAND at the moment because we need
651 	   obsolete dirent nodes to stay on the lists, because of the
652 	   horridness in jffs2_garbage_collect_deletion_dirent(). Also
653 	   because we delete the inocache, and on NAND we need that to
654 	   stay around until all the nodes are actually erased, in order
655 	   to stop us from giving the same inode number to another newly
656 	   created inode. */
657 	if (ref->next_in_ino) {
658 		struct jffs2_inode_cache *ic;
659 		struct jffs2_raw_node_ref **p;
660 
661 		spin_lock(&c->erase_completion_lock);
662 
663 		ic = jffs2_raw_ref_to_ic(ref);
664 		for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino))
665 			;
666 
667 		*p = ref->next_in_ino;
668 		ref->next_in_ino = NULL;
669 
670 		if (ic->nodes == (void *)ic && ic->nlink == 0)
671 			jffs2_del_ino_cache(c, ic);
672 
673 		spin_unlock(&c->erase_completion_lock);
674 	}
675 
676 
677 	/* Merge with the next node in the physical list, if there is one
678 	   and if it's also obsolete and if it doesn't belong to any inode */
679 	if (ref->next_phys && ref_obsolete(ref->next_phys) &&
680 	    !ref->next_phys->next_in_ino) {
681 		struct jffs2_raw_node_ref *n = ref->next_phys;
682 
683 		spin_lock(&c->erase_completion_lock);
684 
685 #ifdef TEST_TOTLEN
686 		ref->__totlen += n->__totlen;
687 #endif
688 		ref->next_phys = n->next_phys;
689                 if (jeb->last_node == n) jeb->last_node = ref;
690 		if (jeb->gc_node == n) {
691 			/* gc will be happy continuing gc on this node */
692 			jeb->gc_node=ref;
693 		}
694 		spin_unlock(&c->erase_completion_lock);
695 
696 		jffs2_free_raw_node_ref(n);
697 	}
698 
699 	/* Also merge with the previous node in the list, if there is one
700 	   and that one is obsolete */
701 	if (ref != jeb->first_node ) {
702 		struct jffs2_raw_node_ref *p = jeb->first_node;
703 
704 		spin_lock(&c->erase_completion_lock);
705 
706 		while (p->next_phys != ref)
707 			p = p->next_phys;
708 
709 		if (ref_obsolete(p) && !ref->next_in_ino) {
710 #ifdef TEST_TOTLEN
711 			p->__totlen += ref->__totlen;
712 #endif
713 			if (jeb->last_node == ref) {
714 				jeb->last_node = p;
715 			}
716 			if (jeb->gc_node == ref) {
717 				/* gc will be happy continuing gc on this node */
718 				jeb->gc_node=p;
719 			}
720 			p->next_phys = ref->next_phys;
721 			jffs2_free_raw_node_ref(ref);
722 		}
723 		spin_unlock(&c->erase_completion_lock);
724 	}
725  out_erase_sem:
726 	up(&c->erase_free_sem);
727 }
728 
729 int jffs2_thread_should_wake(struct jffs2_sb_info *c)
730 {
731 	int ret = 0;
732 	uint32_t dirty;
733 
734 	if (c->unchecked_size) {
735 		D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n",
736 			  c->unchecked_size, c->checked_ino));
737 		return 1;
738 	}
739 
740 	/* dirty_size contains blocks on erase_pending_list
741 	 * those blocks are counted in c->nr_erasing_blocks.
742 	 * If one block is actually erased, it is not longer counted as dirty_space
743 	 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
744 	 * with c->nr_erasing_blocks * c->sector_size again.
745 	 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
746 	 * This helps us to force gc and pick eventually a clean block to spread the load.
747 	 */
748 	dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size;
749 
750 	if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger &&
751 			(dirty > c->nospc_dirty_size))
752 		ret = 1;
753 
754 	D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x: %s\n",
755 		  c->nr_free_blocks, c->nr_erasing_blocks, c->dirty_size, ret?"yes":"no"));
756 
757 	return ret;
758 }
759