xref: /openbmc/linux/fs/jffs2/nodemgmt.c (revision 8ca646ab)
1 /*
2  * JFFS2 -- Journalling Flash File System, Version 2.
3  *
4  * Copyright © 2001-2007 Red Hat, Inc.
5  *
6  * Created by David Woodhouse <dwmw2@infradead.org>
7  *
8  * For licensing information, see the file 'LICENCE' in this directory.
9  *
10  */
11 
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 #include <linux/mtd/mtd.h>
15 #include <linux/compiler.h>
16 #include <linux/sched.h> /* For cond_resched() */
17 #include "nodelist.h"
18 #include "debug.h"
19 
20 /**
21  *	jffs2_reserve_space - request physical space to write nodes to flash
22  *	@c: superblock info
23  *	@minsize: Minimum acceptable size of allocation
24  *	@len: Returned value of allocation length
25  *	@prio: Allocation type - ALLOC_{NORMAL,DELETION}
26  *
27  *	Requests a block of physical space on the flash. Returns zero for success
28  *	and puts 'len' into the appropriate place, or returns -ENOSPC or other
29  *	error if appropriate. Doesn't return len since that's
30  *
31  *	If it returns zero, jffs2_reserve_space() also downs the per-filesystem
32  *	allocation semaphore, to prevent more than one allocation from being
33  *	active at any time. The semaphore is later released by jffs2_commit_allocation()
34  *
35  *	jffs2_reserve_space() may trigger garbage collection in order to make room
36  *	for the requested allocation.
37  */
38 
39 static int jffs2_do_reserve_space(struct jffs2_sb_info *c,  uint32_t minsize,
40 				  uint32_t *len, uint32_t sumsize);
41 
42 int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
43 			uint32_t *len, int prio, uint32_t sumsize)
44 {
45 	int ret = -EAGAIN;
46 	int blocksneeded = c->resv_blocks_write;
47 	/* align it */
48 	minsize = PAD(minsize);
49 
50 	D1(printk(KERN_DEBUG "jffs2_reserve_space(): Requested 0x%x bytes\n", minsize));
51 	down(&c->alloc_sem);
52 
53 	D1(printk(KERN_DEBUG "jffs2_reserve_space(): alloc sem got\n"));
54 
55 	spin_lock(&c->erase_completion_lock);
56 
57 	/* this needs a little more thought (true <tglx> :)) */
58 	while(ret == -EAGAIN) {
59 		while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) {
60 			uint32_t dirty, avail;
61 
62 			/* calculate real dirty size
63 			 * dirty_size contains blocks on erase_pending_list
64 			 * those blocks are counted in c->nr_erasing_blocks.
65 			 * If one block is actually erased, it is not longer counted as dirty_space
66 			 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
67 			 * with c->nr_erasing_blocks * c->sector_size again.
68 			 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
69 			 * This helps us to force gc and pick eventually a clean block to spread the load.
70 			 * We add unchecked_size here, as we hopefully will find some space to use.
71 			 * This will affect the sum only once, as gc first finishes checking
72 			 * of nodes.
73 			 */
74 			dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size;
75 			if (dirty < c->nospc_dirty_size) {
76 				if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
77 					D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on dirty space to GC, but it's a deletion. Allowing...\n"));
78 					break;
79 				}
80 				D1(printk(KERN_DEBUG "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n",
81 					  dirty, c->unchecked_size, c->sector_size));
82 
83 				spin_unlock(&c->erase_completion_lock);
84 				up(&c->alloc_sem);
85 				return -ENOSPC;
86 			}
87 
88 			/* Calc possibly available space. Possibly available means that we
89 			 * don't know, if unchecked size contains obsoleted nodes, which could give us some
90 			 * more usable space. This will affect the sum only once, as gc first finishes checking
91 			 * of nodes.
92 			 + Return -ENOSPC, if the maximum possibly available space is less or equal than
93 			 * blocksneeded * sector_size.
94 			 * This blocks endless gc looping on a filesystem, which is nearly full, even if
95 			 * the check above passes.
96 			 */
97 			avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size;
98 			if ( (avail / c->sector_size) <= blocksneeded) {
99 				if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
100 					D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on possibly available space, but it's a deletion. Allowing...\n"));
101 					break;
102 				}
103 
104 				D1(printk(KERN_DEBUG "max. available size 0x%08x  < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n",
105 					  avail, blocksneeded * c->sector_size));
106 				spin_unlock(&c->erase_completion_lock);
107 				up(&c->alloc_sem);
108 				return -ENOSPC;
109 			}
110 
111 			up(&c->alloc_sem);
112 
113 			D1(printk(KERN_DEBUG "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
114 				  c->nr_free_blocks, c->nr_erasing_blocks, c->free_size, c->dirty_size, c->wasted_size, c->used_size, c->erasing_size, c->bad_size,
115 				  c->free_size + c->dirty_size + c->wasted_size + c->used_size + c->erasing_size + c->bad_size, c->flash_size));
116 			spin_unlock(&c->erase_completion_lock);
117 
118 			ret = jffs2_garbage_collect_pass(c);
119 			if (ret)
120 				return ret;
121 
122 			cond_resched();
123 
124 			if (signal_pending(current))
125 				return -EINTR;
126 
127 			down(&c->alloc_sem);
128 			spin_lock(&c->erase_completion_lock);
129 		}
130 
131 		ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
132 		if (ret) {
133 			D1(printk(KERN_DEBUG "jffs2_reserve_space: ret is %d\n", ret));
134 		}
135 	}
136 	spin_unlock(&c->erase_completion_lock);
137 	if (!ret)
138 		ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
139 	if (ret)
140 		up(&c->alloc_sem);
141 	return ret;
142 }
143 
144 int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize,
145 			   uint32_t *len, uint32_t sumsize)
146 {
147 	int ret = -EAGAIN;
148 	minsize = PAD(minsize);
149 
150 	D1(printk(KERN_DEBUG "jffs2_reserve_space_gc(): Requested 0x%x bytes\n", minsize));
151 
152 	spin_lock(&c->erase_completion_lock);
153 	while(ret == -EAGAIN) {
154 		ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
155 		if (ret) {
156 			D1(printk(KERN_DEBUG "jffs2_reserve_space_gc: looping, ret is %d\n", ret));
157 		}
158 	}
159 	spin_unlock(&c->erase_completion_lock);
160 	if (!ret)
161 		ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
162 
163 	return ret;
164 }
165 
166 
167 /* Classify nextblock (clean, dirty of verydirty) and force to select an other one */
168 
169 static void jffs2_close_nextblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
170 {
171 
172 	if (c->nextblock == NULL) {
173 		D1(printk(KERN_DEBUG "jffs2_close_nextblock: Erase block at 0x%08x has already been placed in a list\n",
174 		  jeb->offset));
175 		return;
176 	}
177 	/* Check, if we have a dirty block now, or if it was dirty already */
178 	if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) {
179 		c->dirty_size += jeb->wasted_size;
180 		c->wasted_size -= jeb->wasted_size;
181 		jeb->dirty_size += jeb->wasted_size;
182 		jeb->wasted_size = 0;
183 		if (VERYDIRTY(c, jeb->dirty_size)) {
184 			D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
185 			  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
186 			list_add_tail(&jeb->list, &c->very_dirty_list);
187 		} else {
188 			D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
189 			  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
190 			list_add_tail(&jeb->list, &c->dirty_list);
191 		}
192 	} else {
193 		D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
194 		  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
195 		list_add_tail(&jeb->list, &c->clean_list);
196 	}
197 	c->nextblock = NULL;
198 
199 }
200 
201 /* Select a new jeb for nextblock */
202 
203 static int jffs2_find_nextblock(struct jffs2_sb_info *c)
204 {
205 	struct list_head *next;
206 
207 	/* Take the next block off the 'free' list */
208 
209 	if (list_empty(&c->free_list)) {
210 
211 		if (!c->nr_erasing_blocks &&
212 			!list_empty(&c->erasable_list)) {
213 			struct jffs2_eraseblock *ejeb;
214 
215 			ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
216 			list_move_tail(&ejeb->list, &c->erase_pending_list);
217 			c->nr_erasing_blocks++;
218 			jffs2_erase_pending_trigger(c);
219 			D1(printk(KERN_DEBUG "jffs2_find_nextblock: Triggering erase of erasable block at 0x%08x\n",
220 				  ejeb->offset));
221 		}
222 
223 		if (!c->nr_erasing_blocks &&
224 			!list_empty(&c->erasable_pending_wbuf_list)) {
225 			D1(printk(KERN_DEBUG "jffs2_find_nextblock: Flushing write buffer\n"));
226 			/* c->nextblock is NULL, no update to c->nextblock allowed */
227 			spin_unlock(&c->erase_completion_lock);
228 			jffs2_flush_wbuf_pad(c);
229 			spin_lock(&c->erase_completion_lock);
230 			/* Have another go. It'll be on the erasable_list now */
231 			return -EAGAIN;
232 		}
233 
234 		if (!c->nr_erasing_blocks) {
235 			/* Ouch. We're in GC, or we wouldn't have got here.
236 			   And there's no space left. At all. */
237 			printk(KERN_CRIT "Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
238 				   c->nr_erasing_blocks, c->nr_free_blocks, list_empty(&c->erasable_list)?"yes":"no",
239 				   list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no");
240 			return -ENOSPC;
241 		}
242 
243 		spin_unlock(&c->erase_completion_lock);
244 		/* Don't wait for it; just erase one right now */
245 		jffs2_erase_pending_blocks(c, 1);
246 		spin_lock(&c->erase_completion_lock);
247 
248 		/* An erase may have failed, decreasing the
249 		   amount of free space available. So we must
250 		   restart from the beginning */
251 		return -EAGAIN;
252 	}
253 
254 	next = c->free_list.next;
255 	list_del(next);
256 	c->nextblock = list_entry(next, struct jffs2_eraseblock, list);
257 	c->nr_free_blocks--;
258 
259 	jffs2_sum_reset_collected(c->summary); /* reset collected summary */
260 
261 	D1(printk(KERN_DEBUG "jffs2_find_nextblock(): new nextblock = 0x%08x\n", c->nextblock->offset));
262 
263 	return 0;
264 }
265 
266 /* Called with alloc sem _and_ erase_completion_lock */
267 static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
268 				  uint32_t *len, uint32_t sumsize)
269 {
270 	struct jffs2_eraseblock *jeb = c->nextblock;
271 	uint32_t reserved_size;				/* for summary information at the end of the jeb */
272 	int ret;
273 
274  restart:
275 	reserved_size = 0;
276 
277 	if (jffs2_sum_active() && (sumsize != JFFS2_SUMMARY_NOSUM_SIZE)) {
278 							/* NOSUM_SIZE means not to generate summary */
279 
280 		if (jeb) {
281 			reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
282 			dbg_summary("minsize=%d , jeb->free=%d ,"
283 						"summary->size=%d , sumsize=%d\n",
284 						minsize, jeb->free_size,
285 						c->summary->sum_size, sumsize);
286 		}
287 
288 		/* Is there enough space for writing out the current node, or we have to
289 		   write out summary information now, close this jeb and select new nextblock? */
290 		if (jeb && (PAD(minsize) + PAD(c->summary->sum_size + sumsize +
291 					JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size)) {
292 
293 			/* Has summary been disabled for this jeb? */
294 			if (jffs2_sum_is_disabled(c->summary)) {
295 				sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
296 				goto restart;
297 			}
298 
299 			/* Writing out the collected summary information */
300 			dbg_summary("generating summary for 0x%08x.\n", jeb->offset);
301 			ret = jffs2_sum_write_sumnode(c);
302 
303 			if (ret)
304 				return ret;
305 
306 			if (jffs2_sum_is_disabled(c->summary)) {
307 				/* jffs2_write_sumnode() couldn't write out the summary information
308 				   diabling summary for this jeb and free the collected information
309 				 */
310 				sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
311 				goto restart;
312 			}
313 
314 			jffs2_close_nextblock(c, jeb);
315 			jeb = NULL;
316 			/* keep always valid value in reserved_size */
317 			reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
318 		}
319 	} else {
320 		if (jeb && minsize > jeb->free_size) {
321 			uint32_t waste;
322 
323 			/* Skip the end of this block and file it as having some dirty space */
324 			/* If there's a pending write to it, flush now */
325 
326 			if (jffs2_wbuf_dirty(c)) {
327 				spin_unlock(&c->erase_completion_lock);
328 				D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n"));
329 				jffs2_flush_wbuf_pad(c);
330 				spin_lock(&c->erase_completion_lock);
331 				jeb = c->nextblock;
332 				goto restart;
333 			}
334 
335 			spin_unlock(&c->erase_completion_lock);
336 
337 			ret = jffs2_prealloc_raw_node_refs(c, jeb, 1);
338 			if (ret)
339 				return ret;
340 			/* Just lock it again and continue. Nothing much can change because
341 			   we hold c->alloc_sem anyway. In fact, it's not entirely clear why
342 			   we hold c->erase_completion_lock in the majority of this function...
343 			   but that's a question for another (more caffeine-rich) day. */
344 			spin_lock(&c->erase_completion_lock);
345 
346 			waste = jeb->free_size;
347 			jffs2_link_node_ref(c, jeb,
348 					    (jeb->offset + c->sector_size - waste) | REF_OBSOLETE,
349 					    waste, NULL);
350 			/* FIXME: that made it count as dirty. Convert to wasted */
351 			jeb->dirty_size -= waste;
352 			c->dirty_size -= waste;
353 			jeb->wasted_size += waste;
354 			c->wasted_size += waste;
355 
356 			jffs2_close_nextblock(c, jeb);
357 			jeb = NULL;
358 		}
359 	}
360 
361 	if (!jeb) {
362 
363 		ret = jffs2_find_nextblock(c);
364 		if (ret)
365 			return ret;
366 
367 		jeb = c->nextblock;
368 
369 		if (jeb->free_size != c->sector_size - c->cleanmarker_size) {
370 			printk(KERN_WARNING "Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n", jeb->offset, jeb->free_size);
371 			goto restart;
372 		}
373 	}
374 	/* OK, jeb (==c->nextblock) is now pointing at a block which definitely has
375 	   enough space */
376 	*len = jeb->free_size - reserved_size;
377 
378 	if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size &&
379 	    !jeb->first_node->next_in_ino) {
380 		/* Only node in it beforehand was a CLEANMARKER node (we think).
381 		   So mark it obsolete now that there's going to be another node
382 		   in the block. This will reduce used_size to zero but We've
383 		   already set c->nextblock so that jffs2_mark_node_obsolete()
384 		   won't try to refile it to the dirty_list.
385 		*/
386 		spin_unlock(&c->erase_completion_lock);
387 		jffs2_mark_node_obsolete(c, jeb->first_node);
388 		spin_lock(&c->erase_completion_lock);
389 	}
390 
391 	D1(printk(KERN_DEBUG "jffs2_do_reserve_space(): Giving 0x%x bytes at 0x%x\n",
392 		  *len, jeb->offset + (c->sector_size - jeb->free_size)));
393 	return 0;
394 }
395 
396 /**
397  *	jffs2_add_physical_node_ref - add a physical node reference to the list
398  *	@c: superblock info
399  *	@new: new node reference to add
400  *	@len: length of this physical node
401  *
402  *	Should only be used to report nodes for which space has been allocated
403  *	by jffs2_reserve_space.
404  *
405  *	Must be called with the alloc_sem held.
406  */
407 
408 struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c,
409 						       uint32_t ofs, uint32_t len,
410 						       struct jffs2_inode_cache *ic)
411 {
412 	struct jffs2_eraseblock *jeb;
413 	struct jffs2_raw_node_ref *new;
414 
415 	jeb = &c->blocks[ofs / c->sector_size];
416 
417 	D1(printk(KERN_DEBUG "jffs2_add_physical_node_ref(): Node at 0x%x(%d), size 0x%x\n",
418 		  ofs & ~3, ofs & 3, len));
419 #if 1
420 	/* Allow non-obsolete nodes only to be added at the end of c->nextblock,
421 	   if c->nextblock is set. Note that wbuf.c will file obsolete nodes
422 	   even after refiling c->nextblock */
423 	if ((c->nextblock || ((ofs & 3) != REF_OBSOLETE))
424 	    && (jeb != c->nextblock || (ofs & ~3) != jeb->offset + (c->sector_size - jeb->free_size))) {
425 		printk(KERN_WARNING "argh. node added in wrong place at 0x%08x(%d)\n", ofs & ~3, ofs & 3);
426 		if (c->nextblock)
427 			printk(KERN_WARNING "nextblock 0x%08x", c->nextblock->offset);
428 		else
429 			printk(KERN_WARNING "No nextblock");
430 		printk(", expected at %08x\n", jeb->offset + (c->sector_size - jeb->free_size));
431 		return ERR_PTR(-EINVAL);
432 	}
433 #endif
434 	spin_lock(&c->erase_completion_lock);
435 
436 	new = jffs2_link_node_ref(c, jeb, ofs, len, ic);
437 
438 	if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) {
439 		/* If it lives on the dirty_list, jffs2_reserve_space will put it there */
440 		D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
441 			  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
442 		if (jffs2_wbuf_dirty(c)) {
443 			/* Flush the last write in the block if it's outstanding */
444 			spin_unlock(&c->erase_completion_lock);
445 			jffs2_flush_wbuf_pad(c);
446 			spin_lock(&c->erase_completion_lock);
447 		}
448 
449 		list_add_tail(&jeb->list, &c->clean_list);
450 		c->nextblock = NULL;
451 	}
452 	jffs2_dbg_acct_sanity_check_nolock(c,jeb);
453 	jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
454 
455 	spin_unlock(&c->erase_completion_lock);
456 
457 	return new;
458 }
459 
460 
461 void jffs2_complete_reservation(struct jffs2_sb_info *c)
462 {
463 	D1(printk(KERN_DEBUG "jffs2_complete_reservation()\n"));
464 	jffs2_garbage_collect_trigger(c);
465 	up(&c->alloc_sem);
466 }
467 
468 static inline int on_list(struct list_head *obj, struct list_head *head)
469 {
470 	struct list_head *this;
471 
472 	list_for_each(this, head) {
473 		if (this == obj) {
474 			D1(printk("%p is on list at %p\n", obj, head));
475 			return 1;
476 
477 		}
478 	}
479 	return 0;
480 }
481 
482 void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref)
483 {
484 	struct jffs2_eraseblock *jeb;
485 	int blocknr;
486 	struct jffs2_unknown_node n;
487 	int ret, addedsize;
488 	size_t retlen;
489 	uint32_t freed_len;
490 
491 	if(unlikely(!ref)) {
492 		printk(KERN_NOTICE "EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
493 		return;
494 	}
495 	if (ref_obsolete(ref)) {
496 		D1(printk(KERN_DEBUG "jffs2_mark_node_obsolete called with already obsolete node at 0x%08x\n", ref_offset(ref)));
497 		return;
498 	}
499 	blocknr = ref->flash_offset / c->sector_size;
500 	if (blocknr >= c->nr_blocks) {
501 		printk(KERN_NOTICE "raw node at 0x%08x is off the end of device!\n", ref->flash_offset);
502 		BUG();
503 	}
504 	jeb = &c->blocks[blocknr];
505 
506 	if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) &&
507 	    !(c->flags & (JFFS2_SB_FLAG_SCANNING | JFFS2_SB_FLAG_BUILDING))) {
508 		/* Hm. This may confuse static lock analysis. If any of the above
509 		   three conditions is false, we're going to return from this
510 		   function without actually obliterating any nodes or freeing
511 		   any jffs2_raw_node_refs. So we don't need to stop erases from
512 		   happening, or protect against people holding an obsolete
513 		   jffs2_raw_node_ref without the erase_completion_lock. */
514 		down(&c->erase_free_sem);
515 	}
516 
517 	spin_lock(&c->erase_completion_lock);
518 
519 	freed_len = ref_totlen(c, jeb, ref);
520 
521 	if (ref_flags(ref) == REF_UNCHECKED) {
522 		D1(if (unlikely(jeb->unchecked_size < freed_len)) {
523 			printk(KERN_NOTICE "raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n",
524 			       freed_len, blocknr, ref->flash_offset, jeb->used_size);
525 			BUG();
526 		})
527 		D1(printk(KERN_DEBUG "Obsoleting previously unchecked node at 0x%08x of len %x: ", ref_offset(ref), freed_len));
528 		jeb->unchecked_size -= freed_len;
529 		c->unchecked_size -= freed_len;
530 	} else {
531 		D1(if (unlikely(jeb->used_size < freed_len)) {
532 			printk(KERN_NOTICE "raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
533 			       freed_len, blocknr, ref->flash_offset, jeb->used_size);
534 			BUG();
535 		})
536 		D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %#x: ", ref_offset(ref), freed_len));
537 		jeb->used_size -= freed_len;
538 		c->used_size -= freed_len;
539 	}
540 
541 	// Take care, that wasted size is taken into concern
542 	if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + freed_len)) && jeb != c->nextblock) {
543 		D1(printk("Dirtying\n"));
544 		addedsize = freed_len;
545 		jeb->dirty_size += freed_len;
546 		c->dirty_size += freed_len;
547 
548 		/* Convert wasted space to dirty, if not a bad block */
549 		if (jeb->wasted_size) {
550 			if (on_list(&jeb->list, &c->bad_used_list)) {
551 				D1(printk(KERN_DEBUG "Leaving block at %08x on the bad_used_list\n",
552 					  jeb->offset));
553 				addedsize = 0; /* To fool the refiling code later */
554 			} else {
555 				D1(printk(KERN_DEBUG "Converting %d bytes of wasted space to dirty in block at %08x\n",
556 					  jeb->wasted_size, jeb->offset));
557 				addedsize += jeb->wasted_size;
558 				jeb->dirty_size += jeb->wasted_size;
559 				c->dirty_size += jeb->wasted_size;
560 				c->wasted_size -= jeb->wasted_size;
561 				jeb->wasted_size = 0;
562 			}
563 		}
564 	} else {
565 		D1(printk("Wasting\n"));
566 		addedsize = 0;
567 		jeb->wasted_size += freed_len;
568 		c->wasted_size += freed_len;
569 	}
570 	ref->flash_offset = ref_offset(ref) | REF_OBSOLETE;
571 
572 	jffs2_dbg_acct_sanity_check_nolock(c, jeb);
573 	jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
574 
575 	if (c->flags & JFFS2_SB_FLAG_SCANNING) {
576 		/* Flash scanning is in progress. Don't muck about with the block
577 		   lists because they're not ready yet, and don't actually
578 		   obliterate nodes that look obsolete. If they weren't
579 		   marked obsolete on the flash at the time they _became_
580 		   obsolete, there was probably a reason for that. */
581 		spin_unlock(&c->erase_completion_lock);
582 		/* We didn't lock the erase_free_sem */
583 		return;
584 	}
585 
586 	if (jeb == c->nextblock) {
587 		D2(printk(KERN_DEBUG "Not moving nextblock 0x%08x to dirty/erase_pending list\n", jeb->offset));
588 	} else if (!jeb->used_size && !jeb->unchecked_size) {
589 		if (jeb == c->gcblock) {
590 			D1(printk(KERN_DEBUG "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n", jeb->offset));
591 			c->gcblock = NULL;
592 		} else {
593 			D1(printk(KERN_DEBUG "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n", jeb->offset));
594 			list_del(&jeb->list);
595 		}
596 		if (jffs2_wbuf_dirty(c)) {
597 			D1(printk(KERN_DEBUG "...and adding to erasable_pending_wbuf_list\n"));
598 			list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list);
599 		} else {
600 			if (jiffies & 127) {
601 				/* Most of the time, we just erase it immediately. Otherwise we
602 				   spend ages scanning it on mount, etc. */
603 				D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
604 				list_add_tail(&jeb->list, &c->erase_pending_list);
605 				c->nr_erasing_blocks++;
606 				jffs2_erase_pending_trigger(c);
607 			} else {
608 				/* Sometimes, however, we leave it elsewhere so it doesn't get
609 				   immediately reused, and we spread the load a bit. */
610 				D1(printk(KERN_DEBUG "...and adding to erasable_list\n"));
611 				list_add_tail(&jeb->list, &c->erasable_list);
612 			}
613 		}
614 		D1(printk(KERN_DEBUG "Done OK\n"));
615 	} else if (jeb == c->gcblock) {
616 		D2(printk(KERN_DEBUG "Not moving gcblock 0x%08x to dirty_list\n", jeb->offset));
617 	} else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) {
618 		D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n", jeb->offset));
619 		list_del(&jeb->list);
620 		D1(printk(KERN_DEBUG "...and adding to dirty_list\n"));
621 		list_add_tail(&jeb->list, &c->dirty_list);
622 	} else if (VERYDIRTY(c, jeb->dirty_size) &&
623 		   !VERYDIRTY(c, jeb->dirty_size - addedsize)) {
624 		D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n", jeb->offset));
625 		list_del(&jeb->list);
626 		D1(printk(KERN_DEBUG "...and adding to very_dirty_list\n"));
627 		list_add_tail(&jeb->list, &c->very_dirty_list);
628 	} else {
629 		D1(printk(KERN_DEBUG "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n",
630 			  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
631 	}
632 
633 	spin_unlock(&c->erase_completion_lock);
634 
635 	if (!jffs2_can_mark_obsolete(c) || jffs2_is_readonly(c) ||
636 		(c->flags & JFFS2_SB_FLAG_BUILDING)) {
637 		/* We didn't lock the erase_free_sem */
638 		return;
639 	}
640 
641 	/* The erase_free_sem is locked, and has been since before we marked the node obsolete
642 	   and potentially put its eraseblock onto the erase_pending_list. Thus, we know that
643 	   the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet
644 	   by jffs2_free_jeb_node_refs() in erase.c. Which is nice. */
645 
646 	D1(printk(KERN_DEBUG "obliterating obsoleted node at 0x%08x\n", ref_offset(ref)));
647 	ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
648 	if (ret) {
649 		printk(KERN_WARNING "Read error reading from obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
650 		goto out_erase_sem;
651 	}
652 	if (retlen != sizeof(n)) {
653 		printk(KERN_WARNING "Short read from obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
654 		goto out_erase_sem;
655 	}
656 	if (PAD(je32_to_cpu(n.totlen)) != PAD(freed_len)) {
657 		printk(KERN_WARNING "Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n", je32_to_cpu(n.totlen), freed_len);
658 		goto out_erase_sem;
659 	}
660 	if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) {
661 		D1(printk(KERN_DEBUG "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n", ref_offset(ref), je16_to_cpu(n.nodetype)));
662 		goto out_erase_sem;
663 	}
664 	/* XXX FIXME: This is ugly now */
665 	n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE);
666 	ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
667 	if (ret) {
668 		printk(KERN_WARNING "Write error in obliterating obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
669 		goto out_erase_sem;
670 	}
671 	if (retlen != sizeof(n)) {
672 		printk(KERN_WARNING "Short write in obliterating obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
673 		goto out_erase_sem;
674 	}
675 
676 	/* Nodes which have been marked obsolete no longer need to be
677 	   associated with any inode. Remove them from the per-inode list.
678 
679 	   Note we can't do this for NAND at the moment because we need
680 	   obsolete dirent nodes to stay on the lists, because of the
681 	   horridness in jffs2_garbage_collect_deletion_dirent(). Also
682 	   because we delete the inocache, and on NAND we need that to
683 	   stay around until all the nodes are actually erased, in order
684 	   to stop us from giving the same inode number to another newly
685 	   created inode. */
686 	if (ref->next_in_ino) {
687 		struct jffs2_inode_cache *ic;
688 		struct jffs2_raw_node_ref **p;
689 
690 		spin_lock(&c->erase_completion_lock);
691 
692 		ic = jffs2_raw_ref_to_ic(ref);
693 		for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino))
694 			;
695 
696 		*p = ref->next_in_ino;
697 		ref->next_in_ino = NULL;
698 
699 		switch (ic->class) {
700 #ifdef CONFIG_JFFS2_FS_XATTR
701 			case RAWNODE_CLASS_XATTR_DATUM:
702 				jffs2_release_xattr_datum(c, (struct jffs2_xattr_datum *)ic);
703 				break;
704 			case RAWNODE_CLASS_XATTR_REF:
705 				jffs2_release_xattr_ref(c, (struct jffs2_xattr_ref *)ic);
706 				break;
707 #endif
708 			default:
709 				if (ic->nodes == (void *)ic && ic->nlink == 0)
710 					jffs2_del_ino_cache(c, ic);
711 				break;
712 		}
713 		spin_unlock(&c->erase_completion_lock);
714 	}
715 
716  out_erase_sem:
717 	up(&c->erase_free_sem);
718 }
719 
720 int jffs2_thread_should_wake(struct jffs2_sb_info *c)
721 {
722 	int ret = 0;
723 	uint32_t dirty;
724 	int nr_very_dirty = 0;
725 	struct jffs2_eraseblock *jeb;
726 
727 	if (c->unchecked_size) {
728 		D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n",
729 			  c->unchecked_size, c->checked_ino));
730 		return 1;
731 	}
732 
733 	/* dirty_size contains blocks on erase_pending_list
734 	 * those blocks are counted in c->nr_erasing_blocks.
735 	 * If one block is actually erased, it is not longer counted as dirty_space
736 	 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
737 	 * with c->nr_erasing_blocks * c->sector_size again.
738 	 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
739 	 * This helps us to force gc and pick eventually a clean block to spread the load.
740 	 */
741 	dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size;
742 
743 	if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger &&
744 			(dirty > c->nospc_dirty_size))
745 		ret = 1;
746 
747 	list_for_each_entry(jeb, &c->very_dirty_list, list) {
748 		nr_very_dirty++;
749 		if (nr_very_dirty == c->vdirty_blocks_gctrigger) {
750 			ret = 1;
751 			/* In debug mode, actually go through and count them all */
752 			D1(continue);
753 			break;
754 		}
755 	}
756 
757 	D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x, vdirty_blocks %d: %s\n",
758 		  c->nr_free_blocks, c->nr_erasing_blocks, c->dirty_size, nr_very_dirty, ret?"yes":"no"));
759 
760 	return ret;
761 }
762