xref: /openbmc/linux/fs/jffs2/nodemgmt.c (revision c00c310e)
1 /*
2  * JFFS2 -- Journalling Flash File System, Version 2.
3  *
4  * Copyright © 2001-2007 Red Hat, Inc.
5  *
6  * Created by David Woodhouse <dwmw2@infradead.org>
7  *
8  * For licensing information, see the file 'LICENCE' in this directory.
9  *
10  */
11 
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 #include <linux/mtd/mtd.h>
15 #include <linux/compiler.h>
16 #include <linux/sched.h> /* For cond_resched() */
17 #include "nodelist.h"
18 #include "debug.h"
19 
20 /**
21  *	jffs2_reserve_space - request physical space to write nodes to flash
22  *	@c: superblock info
23  *	@minsize: Minimum acceptable size of allocation
24  *	@len: Returned value of allocation length
25  *	@prio: Allocation type - ALLOC_{NORMAL,DELETION}
26  *
27  *	Requests a block of physical space on the flash. Returns zero for success
28  *	and puts 'len' into the appropriate place, or returns -ENOSPC or other
29  *	error if appropriate. Doesn't return len since that's
30  *
31  *	If it returns zero, jffs2_reserve_space() also downs the per-filesystem
32  *	allocation semaphore, to prevent more than one allocation from being
33  *	active at any time. The semaphore is later released by jffs2_commit_allocation()
34  *
35  *	jffs2_reserve_space() may trigger garbage collection in order to make room
36  *	for the requested allocation.
37  */
38 
39 static int jffs2_do_reserve_space(struct jffs2_sb_info *c,  uint32_t minsize,
40 				  uint32_t *len, uint32_t sumsize);
41 
42 int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
43 			uint32_t *len, int prio, uint32_t sumsize)
44 {
45 	int ret = -EAGAIN;
46 	int blocksneeded = c->resv_blocks_write;
47 	/* align it */
48 	minsize = PAD(minsize);
49 
50 	D1(printk(KERN_DEBUG "jffs2_reserve_space(): Requested 0x%x bytes\n", minsize));
51 	down(&c->alloc_sem);
52 
53 	D1(printk(KERN_DEBUG "jffs2_reserve_space(): alloc sem got\n"));
54 
55 	spin_lock(&c->erase_completion_lock);
56 
57 	/* this needs a little more thought (true <tglx> :)) */
58 	while(ret == -EAGAIN) {
59 		while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) {
60 			int ret;
61 			uint32_t dirty, avail;
62 
63 			/* calculate real dirty size
64 			 * dirty_size contains blocks on erase_pending_list
65 			 * those blocks are counted in c->nr_erasing_blocks.
66 			 * If one block is actually erased, it is not longer counted as dirty_space
67 			 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
68 			 * with c->nr_erasing_blocks * c->sector_size again.
69 			 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
70 			 * This helps us to force gc and pick eventually a clean block to spread the load.
71 			 * We add unchecked_size here, as we hopefully will find some space to use.
72 			 * This will affect the sum only once, as gc first finishes checking
73 			 * of nodes.
74 			 */
75 			dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size;
76 			if (dirty < c->nospc_dirty_size) {
77 				if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
78 					D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on dirty space to GC, but it's a deletion. Allowing...\n"));
79 					break;
80 				}
81 				D1(printk(KERN_DEBUG "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n",
82 					  dirty, c->unchecked_size, c->sector_size));
83 
84 				spin_unlock(&c->erase_completion_lock);
85 				up(&c->alloc_sem);
86 				return -ENOSPC;
87 			}
88 
89 			/* Calc possibly available space. Possibly available means that we
90 			 * don't know, if unchecked size contains obsoleted nodes, which could give us some
91 			 * more usable space. This will affect the sum only once, as gc first finishes checking
92 			 * of nodes.
93 			 + Return -ENOSPC, if the maximum possibly available space is less or equal than
94 			 * blocksneeded * sector_size.
95 			 * This blocks endless gc looping on a filesystem, which is nearly full, even if
96 			 * the check above passes.
97 			 */
98 			avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size;
99 			if ( (avail / c->sector_size) <= blocksneeded) {
100 				if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
101 					D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on possibly available space, but it's a deletion. Allowing...\n"));
102 					break;
103 				}
104 
105 				D1(printk(KERN_DEBUG "max. available size 0x%08x  < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n",
106 					  avail, blocksneeded * c->sector_size));
107 				spin_unlock(&c->erase_completion_lock);
108 				up(&c->alloc_sem);
109 				return -ENOSPC;
110 			}
111 
112 			up(&c->alloc_sem);
113 
114 			D1(printk(KERN_DEBUG "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
115 				  c->nr_free_blocks, c->nr_erasing_blocks, c->free_size, c->dirty_size, c->wasted_size, c->used_size, c->erasing_size, c->bad_size,
116 				  c->free_size + c->dirty_size + c->wasted_size + c->used_size + c->erasing_size + c->bad_size, c->flash_size));
117 			spin_unlock(&c->erase_completion_lock);
118 
119 			ret = jffs2_garbage_collect_pass(c);
120 			if (ret)
121 				return ret;
122 
123 			cond_resched();
124 
125 			if (signal_pending(current))
126 				return -EINTR;
127 
128 			down(&c->alloc_sem);
129 			spin_lock(&c->erase_completion_lock);
130 		}
131 
132 		ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
133 		if (ret) {
134 			D1(printk(KERN_DEBUG "jffs2_reserve_space: ret is %d\n", ret));
135 		}
136 	}
137 	spin_unlock(&c->erase_completion_lock);
138 	if (!ret)
139 		ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
140 	if (ret)
141 		up(&c->alloc_sem);
142 	return ret;
143 }
144 
145 int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize,
146 			   uint32_t *len, uint32_t sumsize)
147 {
148 	int ret = -EAGAIN;
149 	minsize = PAD(minsize);
150 
151 	D1(printk(KERN_DEBUG "jffs2_reserve_space_gc(): Requested 0x%x bytes\n", minsize));
152 
153 	spin_lock(&c->erase_completion_lock);
154 	while(ret == -EAGAIN) {
155 		ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
156 		if (ret) {
157 		        D1(printk(KERN_DEBUG "jffs2_reserve_space_gc: looping, ret is %d\n", ret));
158 		}
159 	}
160 	spin_unlock(&c->erase_completion_lock);
161 	if (!ret)
162 		ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
163 
164 	return ret;
165 }
166 
167 
168 /* Classify nextblock (clean, dirty of verydirty) and force to select an other one */
169 
170 static void jffs2_close_nextblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
171 {
172 
173 	if (c->nextblock == NULL) {
174 		D1(printk(KERN_DEBUG "jffs2_close_nextblock: Erase block at 0x%08x has already been placed in a list\n",
175 		  jeb->offset));
176 		return;
177 	}
178 	/* Check, if we have a dirty block now, or if it was dirty already */
179 	if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) {
180 		c->dirty_size += jeb->wasted_size;
181 		c->wasted_size -= jeb->wasted_size;
182 		jeb->dirty_size += jeb->wasted_size;
183 		jeb->wasted_size = 0;
184 		if (VERYDIRTY(c, jeb->dirty_size)) {
185 			D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
186 			  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
187 			list_add_tail(&jeb->list, &c->very_dirty_list);
188 		} else {
189 			D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
190 			  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
191 			list_add_tail(&jeb->list, &c->dirty_list);
192 		}
193 	} else {
194 		D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
195 		  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
196 		list_add_tail(&jeb->list, &c->clean_list);
197 	}
198 	c->nextblock = NULL;
199 
200 }
201 
202 /* Select a new jeb for nextblock */
203 
204 static int jffs2_find_nextblock(struct jffs2_sb_info *c)
205 {
206 	struct list_head *next;
207 
208 	/* Take the next block off the 'free' list */
209 
210 	if (list_empty(&c->free_list)) {
211 
212 		if (!c->nr_erasing_blocks &&
213 			!list_empty(&c->erasable_list)) {
214 			struct jffs2_eraseblock *ejeb;
215 
216 			ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
217 			list_move_tail(&ejeb->list, &c->erase_pending_list);
218 			c->nr_erasing_blocks++;
219 			jffs2_erase_pending_trigger(c);
220 			D1(printk(KERN_DEBUG "jffs2_find_nextblock: Triggering erase of erasable block at 0x%08x\n",
221 				  ejeb->offset));
222 		}
223 
224 		if (!c->nr_erasing_blocks &&
225 			!list_empty(&c->erasable_pending_wbuf_list)) {
226 			D1(printk(KERN_DEBUG "jffs2_find_nextblock: Flushing write buffer\n"));
227 			/* c->nextblock is NULL, no update to c->nextblock allowed */
228 			spin_unlock(&c->erase_completion_lock);
229 			jffs2_flush_wbuf_pad(c);
230 			spin_lock(&c->erase_completion_lock);
231 			/* Have another go. It'll be on the erasable_list now */
232 			return -EAGAIN;
233 		}
234 
235 		if (!c->nr_erasing_blocks) {
236 			/* Ouch. We're in GC, or we wouldn't have got here.
237 			   And there's no space left. At all. */
238 			printk(KERN_CRIT "Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
239 				   c->nr_erasing_blocks, c->nr_free_blocks, list_empty(&c->erasable_list)?"yes":"no",
240 				   list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no");
241 			return -ENOSPC;
242 		}
243 
244 		spin_unlock(&c->erase_completion_lock);
245 		/* Don't wait for it; just erase one right now */
246 		jffs2_erase_pending_blocks(c, 1);
247 		spin_lock(&c->erase_completion_lock);
248 
249 		/* An erase may have failed, decreasing the
250 		   amount of free space available. So we must
251 		   restart from the beginning */
252 		return -EAGAIN;
253 	}
254 
255 	next = c->free_list.next;
256 	list_del(next);
257 	c->nextblock = list_entry(next, struct jffs2_eraseblock, list);
258 	c->nr_free_blocks--;
259 
260 	jffs2_sum_reset_collected(c->summary); /* reset collected summary */
261 
262 	D1(printk(KERN_DEBUG "jffs2_find_nextblock(): new nextblock = 0x%08x\n", c->nextblock->offset));
263 
264 	return 0;
265 }
266 
267 /* Called with alloc sem _and_ erase_completion_lock */
268 static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
269 				  uint32_t *len, uint32_t sumsize)
270 {
271 	struct jffs2_eraseblock *jeb = c->nextblock;
272 	uint32_t reserved_size;				/* for summary information at the end of the jeb */
273 	int ret;
274 
275  restart:
276 	reserved_size = 0;
277 
278 	if (jffs2_sum_active() && (sumsize != JFFS2_SUMMARY_NOSUM_SIZE)) {
279 							/* NOSUM_SIZE means not to generate summary */
280 
281 		if (jeb) {
282 			reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
283 			dbg_summary("minsize=%d , jeb->free=%d ,"
284 						"summary->size=%d , sumsize=%d\n",
285 						minsize, jeb->free_size,
286 						c->summary->sum_size, sumsize);
287 		}
288 
289 		/* Is there enough space for writing out the current node, or we have to
290 		   write out summary information now, close this jeb and select new nextblock? */
291 		if (jeb && (PAD(minsize) + PAD(c->summary->sum_size + sumsize +
292 					JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size)) {
293 
294 			/* Has summary been disabled for this jeb? */
295 			if (jffs2_sum_is_disabled(c->summary)) {
296 				sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
297 				goto restart;
298 			}
299 
300 			/* Writing out the collected summary information */
301 			dbg_summary("generating summary for 0x%08x.\n", jeb->offset);
302 			ret = jffs2_sum_write_sumnode(c);
303 
304 			if (ret)
305 				return ret;
306 
307 			if (jffs2_sum_is_disabled(c->summary)) {
308 				/* jffs2_write_sumnode() couldn't write out the summary information
309 				   diabling summary for this jeb and free the collected information
310 				 */
311 				sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
312 				goto restart;
313 			}
314 
315 			jffs2_close_nextblock(c, jeb);
316 			jeb = NULL;
317 			/* keep always valid value in reserved_size */
318 			reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
319 		}
320 	} else {
321 		if (jeb && minsize > jeb->free_size) {
322 			uint32_t waste;
323 
324 			/* Skip the end of this block and file it as having some dirty space */
325 			/* If there's a pending write to it, flush now */
326 
327 			if (jffs2_wbuf_dirty(c)) {
328 				spin_unlock(&c->erase_completion_lock);
329 				D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n"));
330 				jffs2_flush_wbuf_pad(c);
331 				spin_lock(&c->erase_completion_lock);
332 				jeb = c->nextblock;
333 				goto restart;
334 			}
335 
336 			spin_unlock(&c->erase_completion_lock);
337 
338 			ret = jffs2_prealloc_raw_node_refs(c, jeb, 1);
339 			if (ret)
340 				return ret;
341 			/* Just lock it again and continue. Nothing much can change because
342 			   we hold c->alloc_sem anyway. In fact, it's not entirely clear why
343 			   we hold c->erase_completion_lock in the majority of this function...
344 			   but that's a question for another (more caffeine-rich) day. */
345 			spin_lock(&c->erase_completion_lock);
346 
347 			waste = jeb->free_size;
348 			jffs2_link_node_ref(c, jeb,
349 					    (jeb->offset + c->sector_size - waste) | REF_OBSOLETE,
350 					    waste, NULL);
351 			/* FIXME: that made it count as dirty. Convert to wasted */
352 			jeb->dirty_size -= waste;
353 			c->dirty_size -= waste;
354 			jeb->wasted_size += waste;
355 			c->wasted_size += waste;
356 
357 			jffs2_close_nextblock(c, jeb);
358 			jeb = NULL;
359 		}
360 	}
361 
362 	if (!jeb) {
363 
364 		ret = jffs2_find_nextblock(c);
365 		if (ret)
366 			return ret;
367 
368 		jeb = c->nextblock;
369 
370 		if (jeb->free_size != c->sector_size - c->cleanmarker_size) {
371 			printk(KERN_WARNING "Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n", jeb->offset, jeb->free_size);
372 			goto restart;
373 		}
374 	}
375 	/* OK, jeb (==c->nextblock) is now pointing at a block which definitely has
376 	   enough space */
377 	*len = jeb->free_size - reserved_size;
378 
379 	if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size &&
380 	    !jeb->first_node->next_in_ino) {
381 		/* Only node in it beforehand was a CLEANMARKER node (we think).
382 		   So mark it obsolete now that there's going to be another node
383 		   in the block. This will reduce used_size to zero but We've
384 		   already set c->nextblock so that jffs2_mark_node_obsolete()
385 		   won't try to refile it to the dirty_list.
386 		*/
387 		spin_unlock(&c->erase_completion_lock);
388 		jffs2_mark_node_obsolete(c, jeb->first_node);
389 		spin_lock(&c->erase_completion_lock);
390 	}
391 
392 	D1(printk(KERN_DEBUG "jffs2_do_reserve_space(): Giving 0x%x bytes at 0x%x\n",
393 		  *len, jeb->offset + (c->sector_size - jeb->free_size)));
394 	return 0;
395 }
396 
397 /**
398  *	jffs2_add_physical_node_ref - add a physical node reference to the list
399  *	@c: superblock info
400  *	@new: new node reference to add
401  *	@len: length of this physical node
402  *
403  *	Should only be used to report nodes for which space has been allocated
404  *	by jffs2_reserve_space.
405  *
406  *	Must be called with the alloc_sem held.
407  */
408 
409 struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c,
410 						       uint32_t ofs, uint32_t len,
411 						       struct jffs2_inode_cache *ic)
412 {
413 	struct jffs2_eraseblock *jeb;
414 	struct jffs2_raw_node_ref *new;
415 
416 	jeb = &c->blocks[ofs / c->sector_size];
417 
418 	D1(printk(KERN_DEBUG "jffs2_add_physical_node_ref(): Node at 0x%x(%d), size 0x%x\n",
419 		  ofs & ~3, ofs & 3, len));
420 #if 1
421 	/* Allow non-obsolete nodes only to be added at the end of c->nextblock,
422 	   if c->nextblock is set. Note that wbuf.c will file obsolete nodes
423 	   even after refiling c->nextblock */
424 	if ((c->nextblock || ((ofs & 3) != REF_OBSOLETE))
425 	    && (jeb != c->nextblock || (ofs & ~3) != jeb->offset + (c->sector_size - jeb->free_size))) {
426 		printk(KERN_WARNING "argh. node added in wrong place\n");
427 		return ERR_PTR(-EINVAL);
428 	}
429 #endif
430 	spin_lock(&c->erase_completion_lock);
431 
432 	new = jffs2_link_node_ref(c, jeb, ofs, len, ic);
433 
434 	if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) {
435 		/* If it lives on the dirty_list, jffs2_reserve_space will put it there */
436 		D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
437 			  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
438 		if (jffs2_wbuf_dirty(c)) {
439 			/* Flush the last write in the block if it's outstanding */
440 			spin_unlock(&c->erase_completion_lock);
441 			jffs2_flush_wbuf_pad(c);
442 			spin_lock(&c->erase_completion_lock);
443 		}
444 
445 		list_add_tail(&jeb->list, &c->clean_list);
446 		c->nextblock = NULL;
447 	}
448 	jffs2_dbg_acct_sanity_check_nolock(c,jeb);
449 	jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
450 
451 	spin_unlock(&c->erase_completion_lock);
452 
453 	return new;
454 }
455 
456 
457 void jffs2_complete_reservation(struct jffs2_sb_info *c)
458 {
459 	D1(printk(KERN_DEBUG "jffs2_complete_reservation()\n"));
460 	jffs2_garbage_collect_trigger(c);
461 	up(&c->alloc_sem);
462 }
463 
464 static inline int on_list(struct list_head *obj, struct list_head *head)
465 {
466 	struct list_head *this;
467 
468 	list_for_each(this, head) {
469 		if (this == obj) {
470 			D1(printk("%p is on list at %p\n", obj, head));
471 			return 1;
472 
473 		}
474 	}
475 	return 0;
476 }
477 
478 void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref)
479 {
480 	struct jffs2_eraseblock *jeb;
481 	int blocknr;
482 	struct jffs2_unknown_node n;
483 	int ret, addedsize;
484 	size_t retlen;
485 	uint32_t freed_len;
486 
487 	if(unlikely(!ref)) {
488 		printk(KERN_NOTICE "EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
489 		return;
490 	}
491 	if (ref_obsolete(ref)) {
492 		D1(printk(KERN_DEBUG "jffs2_mark_node_obsolete called with already obsolete node at 0x%08x\n", ref_offset(ref)));
493 		return;
494 	}
495 	blocknr = ref->flash_offset / c->sector_size;
496 	if (blocknr >= c->nr_blocks) {
497 		printk(KERN_NOTICE "raw node at 0x%08x is off the end of device!\n", ref->flash_offset);
498 		BUG();
499 	}
500 	jeb = &c->blocks[blocknr];
501 
502 	if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) &&
503 	    !(c->flags & (JFFS2_SB_FLAG_SCANNING | JFFS2_SB_FLAG_BUILDING))) {
504 		/* Hm. This may confuse static lock analysis. If any of the above
505 		   three conditions is false, we're going to return from this
506 		   function without actually obliterating any nodes or freeing
507 		   any jffs2_raw_node_refs. So we don't need to stop erases from
508 		   happening, or protect against people holding an obsolete
509 		   jffs2_raw_node_ref without the erase_completion_lock. */
510 		down(&c->erase_free_sem);
511 	}
512 
513 	spin_lock(&c->erase_completion_lock);
514 
515 	freed_len = ref_totlen(c, jeb, ref);
516 
517 	if (ref_flags(ref) == REF_UNCHECKED) {
518 		D1(if (unlikely(jeb->unchecked_size < freed_len)) {
519 			printk(KERN_NOTICE "raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n",
520 			       freed_len, blocknr, ref->flash_offset, jeb->used_size);
521 			BUG();
522 		})
523 		D1(printk(KERN_DEBUG "Obsoleting previously unchecked node at 0x%08x of len %x: ", ref_offset(ref), freed_len));
524 		jeb->unchecked_size -= freed_len;
525 		c->unchecked_size -= freed_len;
526 	} else {
527 		D1(if (unlikely(jeb->used_size < freed_len)) {
528 			printk(KERN_NOTICE "raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
529 			       freed_len, blocknr, ref->flash_offset, jeb->used_size);
530 			BUG();
531 		})
532 		D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %#x: ", ref_offset(ref), freed_len));
533 		jeb->used_size -= freed_len;
534 		c->used_size -= freed_len;
535 	}
536 
537 	// Take care, that wasted size is taken into concern
538 	if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + freed_len)) && jeb != c->nextblock) {
539 		D1(printk("Dirtying\n"));
540 		addedsize = freed_len;
541 		jeb->dirty_size += freed_len;
542 		c->dirty_size += freed_len;
543 
544 		/* Convert wasted space to dirty, if not a bad block */
545 		if (jeb->wasted_size) {
546 			if (on_list(&jeb->list, &c->bad_used_list)) {
547 				D1(printk(KERN_DEBUG "Leaving block at %08x on the bad_used_list\n",
548 					  jeb->offset));
549 				addedsize = 0; /* To fool the refiling code later */
550 			} else {
551 				D1(printk(KERN_DEBUG "Converting %d bytes of wasted space to dirty in block at %08x\n",
552 					  jeb->wasted_size, jeb->offset));
553 				addedsize += jeb->wasted_size;
554 				jeb->dirty_size += jeb->wasted_size;
555 				c->dirty_size += jeb->wasted_size;
556 				c->wasted_size -= jeb->wasted_size;
557 				jeb->wasted_size = 0;
558 			}
559 		}
560 	} else {
561 		D1(printk("Wasting\n"));
562 		addedsize = 0;
563 		jeb->wasted_size += freed_len;
564 		c->wasted_size += freed_len;
565 	}
566 	ref->flash_offset = ref_offset(ref) | REF_OBSOLETE;
567 
568 	jffs2_dbg_acct_sanity_check_nolock(c, jeb);
569 	jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
570 
571 	if (c->flags & JFFS2_SB_FLAG_SCANNING) {
572 		/* Flash scanning is in progress. Don't muck about with the block
573 		   lists because they're not ready yet, and don't actually
574 		   obliterate nodes that look obsolete. If they weren't
575 		   marked obsolete on the flash at the time they _became_
576 		   obsolete, there was probably a reason for that. */
577 		spin_unlock(&c->erase_completion_lock);
578 		/* We didn't lock the erase_free_sem */
579 		return;
580 	}
581 
582 	if (jeb == c->nextblock) {
583 		D2(printk(KERN_DEBUG "Not moving nextblock 0x%08x to dirty/erase_pending list\n", jeb->offset));
584 	} else if (!jeb->used_size && !jeb->unchecked_size) {
585 		if (jeb == c->gcblock) {
586 			D1(printk(KERN_DEBUG "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n", jeb->offset));
587 			c->gcblock = NULL;
588 		} else {
589 			D1(printk(KERN_DEBUG "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n", jeb->offset));
590 			list_del(&jeb->list);
591 		}
592 		if (jffs2_wbuf_dirty(c)) {
593 			D1(printk(KERN_DEBUG "...and adding to erasable_pending_wbuf_list\n"));
594 			list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list);
595 		} else {
596 			if (jiffies & 127) {
597 				/* Most of the time, we just erase it immediately. Otherwise we
598 				   spend ages scanning it on mount, etc. */
599 				D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
600 				list_add_tail(&jeb->list, &c->erase_pending_list);
601 				c->nr_erasing_blocks++;
602 				jffs2_erase_pending_trigger(c);
603 			} else {
604 				/* Sometimes, however, we leave it elsewhere so it doesn't get
605 				   immediately reused, and we spread the load a bit. */
606 				D1(printk(KERN_DEBUG "...and adding to erasable_list\n"));
607 				list_add_tail(&jeb->list, &c->erasable_list);
608 			}
609 		}
610 		D1(printk(KERN_DEBUG "Done OK\n"));
611 	} else if (jeb == c->gcblock) {
612 		D2(printk(KERN_DEBUG "Not moving gcblock 0x%08x to dirty_list\n", jeb->offset));
613 	} else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) {
614 		D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n", jeb->offset));
615 		list_del(&jeb->list);
616 		D1(printk(KERN_DEBUG "...and adding to dirty_list\n"));
617 		list_add_tail(&jeb->list, &c->dirty_list);
618 	} else if (VERYDIRTY(c, jeb->dirty_size) &&
619 		   !VERYDIRTY(c, jeb->dirty_size - addedsize)) {
620 		D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n", jeb->offset));
621 		list_del(&jeb->list);
622 		D1(printk(KERN_DEBUG "...and adding to very_dirty_list\n"));
623 		list_add_tail(&jeb->list, &c->very_dirty_list);
624 	} else {
625 		D1(printk(KERN_DEBUG "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n",
626 			  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
627 	}
628 
629 	spin_unlock(&c->erase_completion_lock);
630 
631 	if (!jffs2_can_mark_obsolete(c) || jffs2_is_readonly(c) ||
632 		(c->flags & JFFS2_SB_FLAG_BUILDING)) {
633 		/* We didn't lock the erase_free_sem */
634 		return;
635 	}
636 
637 	/* The erase_free_sem is locked, and has been since before we marked the node obsolete
638 	   and potentially put its eraseblock onto the erase_pending_list. Thus, we know that
639 	   the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet
640 	   by jffs2_free_jeb_node_refs() in erase.c. Which is nice. */
641 
642 	D1(printk(KERN_DEBUG "obliterating obsoleted node at 0x%08x\n", ref_offset(ref)));
643 	ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
644 	if (ret) {
645 		printk(KERN_WARNING "Read error reading from obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
646 		goto out_erase_sem;
647 	}
648 	if (retlen != sizeof(n)) {
649 		printk(KERN_WARNING "Short read from obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
650 		goto out_erase_sem;
651 	}
652 	if (PAD(je32_to_cpu(n.totlen)) != PAD(freed_len)) {
653 		printk(KERN_WARNING "Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n", je32_to_cpu(n.totlen), freed_len);
654 		goto out_erase_sem;
655 	}
656 	if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) {
657 		D1(printk(KERN_DEBUG "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n", ref_offset(ref), je16_to_cpu(n.nodetype)));
658 		goto out_erase_sem;
659 	}
660 	/* XXX FIXME: This is ugly now */
661 	n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE);
662 	ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
663 	if (ret) {
664 		printk(KERN_WARNING "Write error in obliterating obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
665 		goto out_erase_sem;
666 	}
667 	if (retlen != sizeof(n)) {
668 		printk(KERN_WARNING "Short write in obliterating obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
669 		goto out_erase_sem;
670 	}
671 
672 	/* Nodes which have been marked obsolete no longer need to be
673 	   associated with any inode. Remove them from the per-inode list.
674 
675 	   Note we can't do this for NAND at the moment because we need
676 	   obsolete dirent nodes to stay on the lists, because of the
677 	   horridness in jffs2_garbage_collect_deletion_dirent(). Also
678 	   because we delete the inocache, and on NAND we need that to
679 	   stay around until all the nodes are actually erased, in order
680 	   to stop us from giving the same inode number to another newly
681 	   created inode. */
682 	if (ref->next_in_ino) {
683 		struct jffs2_inode_cache *ic;
684 		struct jffs2_raw_node_ref **p;
685 
686 		spin_lock(&c->erase_completion_lock);
687 
688 		ic = jffs2_raw_ref_to_ic(ref);
689 		for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino))
690 			;
691 
692 		*p = ref->next_in_ino;
693 		ref->next_in_ino = NULL;
694 
695 		switch (ic->class) {
696 #ifdef CONFIG_JFFS2_FS_XATTR
697 			case RAWNODE_CLASS_XATTR_DATUM:
698 				jffs2_release_xattr_datum(c, (struct jffs2_xattr_datum *)ic);
699 				break;
700 			case RAWNODE_CLASS_XATTR_REF:
701 				jffs2_release_xattr_ref(c, (struct jffs2_xattr_ref *)ic);
702 				break;
703 #endif
704 			default:
705 				if (ic->nodes == (void *)ic && ic->nlink == 0)
706 					jffs2_del_ino_cache(c, ic);
707 				break;
708 		}
709 		spin_unlock(&c->erase_completion_lock);
710 	}
711 
712  out_erase_sem:
713 	up(&c->erase_free_sem);
714 }
715 
716 int jffs2_thread_should_wake(struct jffs2_sb_info *c)
717 {
718 	int ret = 0;
719 	uint32_t dirty;
720 
721 	if (c->unchecked_size) {
722 		D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n",
723 			  c->unchecked_size, c->checked_ino));
724 		return 1;
725 	}
726 
727 	/* dirty_size contains blocks on erase_pending_list
728 	 * those blocks are counted in c->nr_erasing_blocks.
729 	 * If one block is actually erased, it is not longer counted as dirty_space
730 	 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
731 	 * with c->nr_erasing_blocks * c->sector_size again.
732 	 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
733 	 * This helps us to force gc and pick eventually a clean block to spread the load.
734 	 */
735 	dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size;
736 
737 	if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger &&
738 			(dirty > c->nospc_dirty_size))
739 		ret = 1;
740 
741 	D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x: %s\n",
742 		  c->nr_free_blocks, c->nr_erasing_blocks, c->dirty_size, ret?"yes":"no"));
743 
744 	return ret;
745 }
746