xref: /openbmc/linux/fs/jffs2/nodemgmt.c (revision 5a528957)
1 /*
2  * JFFS2 -- Journalling Flash File System, Version 2.
3  *
4  * Copyright © 2001-2007 Red Hat, Inc.
5  *
6  * Created by David Woodhouse <dwmw2@infradead.org>
7  *
8  * For licensing information, see the file 'LICENCE' in this directory.
9  *
10  */
11 
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 
14 #include <linux/kernel.h>
15 #include <linux/mtd/mtd.h>
16 #include <linux/compiler.h>
17 #include <linux/sched.h> /* For cond_resched() */
18 #include "nodelist.h"
19 #include "debug.h"
20 
21 /**
22  *	jffs2_reserve_space - request physical space to write nodes to flash
23  *	@c: superblock info
24  *	@minsize: Minimum acceptable size of allocation
25  *	@len: Returned value of allocation length
26  *	@prio: Allocation type - ALLOC_{NORMAL,DELETION}
27  *
28  *	Requests a block of physical space on the flash. Returns zero for success
29  *	and puts 'len' into the appropriate place, or returns -ENOSPC or other
30  *	error if appropriate. Doesn't return len since that's
31  *
32  *	If it returns zero, jffs2_reserve_space() also downs the per-filesystem
33  *	allocation semaphore, to prevent more than one allocation from being
34  *	active at any time. The semaphore is later released by jffs2_commit_allocation()
35  *
36  *	jffs2_reserve_space() may trigger garbage collection in order to make room
37  *	for the requested allocation.
38  */
39 
40 static int jffs2_do_reserve_space(struct jffs2_sb_info *c,  uint32_t minsize,
41 				  uint32_t *len, uint32_t sumsize);
42 
43 int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
44 			uint32_t *len, int prio, uint32_t sumsize)
45 {
46 	int ret = -EAGAIN;
47 	int blocksneeded = c->resv_blocks_write;
48 	/* align it */
49 	minsize = PAD(minsize);
50 
51 	jffs2_dbg(1, "%s(): Requested 0x%x bytes\n", __func__, minsize);
52 	mutex_lock(&c->alloc_sem);
53 
54 	jffs2_dbg(1, "%s(): alloc sem got\n", __func__);
55 
56 	spin_lock(&c->erase_completion_lock);
57 
58 	/* this needs a little more thought (true <tglx> :)) */
59 	while(ret == -EAGAIN) {
60 		while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) {
61 			uint32_t dirty, avail;
62 
63 			/* calculate real dirty size
64 			 * dirty_size contains blocks on erase_pending_list
65 			 * those blocks are counted in c->nr_erasing_blocks.
66 			 * If one block is actually erased, it is not longer counted as dirty_space
67 			 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
68 			 * with c->nr_erasing_blocks * c->sector_size again.
69 			 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
70 			 * This helps us to force gc and pick eventually a clean block to spread the load.
71 			 * We add unchecked_size here, as we hopefully will find some space to use.
72 			 * This will affect the sum only once, as gc first finishes checking
73 			 * of nodes.
74 			 */
75 			dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size;
76 			if (dirty < c->nospc_dirty_size) {
77 				if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
78 					jffs2_dbg(1, "%s(): Low on dirty space to GC, but it's a deletion. Allowing...\n",
79 						  __func__);
80 					break;
81 				}
82 				jffs2_dbg(1, "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n",
83 					  dirty, c->unchecked_size,
84 					  c->sector_size);
85 
86 				spin_unlock(&c->erase_completion_lock);
87 				mutex_unlock(&c->alloc_sem);
88 				return -ENOSPC;
89 			}
90 
91 			/* Calc possibly available space. Possibly available means that we
92 			 * don't know, if unchecked size contains obsoleted nodes, which could give us some
93 			 * more usable space. This will affect the sum only once, as gc first finishes checking
94 			 * of nodes.
95 			 + Return -ENOSPC, if the maximum possibly available space is less or equal than
96 			 * blocksneeded * sector_size.
97 			 * This blocks endless gc looping on a filesystem, which is nearly full, even if
98 			 * the check above passes.
99 			 */
100 			avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size;
101 			if ( (avail / c->sector_size) <= blocksneeded) {
102 				if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
103 					jffs2_dbg(1, "%s(): Low on possibly available space, but it's a deletion. Allowing...\n",
104 						  __func__);
105 					break;
106 				}
107 
108 				jffs2_dbg(1, "max. available size 0x%08x  < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n",
109 					  avail, blocksneeded * c->sector_size);
110 				spin_unlock(&c->erase_completion_lock);
111 				mutex_unlock(&c->alloc_sem);
112 				return -ENOSPC;
113 			}
114 
115 			mutex_unlock(&c->alloc_sem);
116 
117 			jffs2_dbg(1, "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
118 				  c->nr_free_blocks, c->nr_erasing_blocks,
119 				  c->free_size, c->dirty_size, c->wasted_size,
120 				  c->used_size, c->erasing_size, c->bad_size,
121 				  c->free_size + c->dirty_size +
122 				  c->wasted_size + c->used_size +
123 				  c->erasing_size + c->bad_size,
124 				  c->flash_size);
125 			spin_unlock(&c->erase_completion_lock);
126 
127 			ret = jffs2_garbage_collect_pass(c);
128 
129 			if (ret == -EAGAIN) {
130 				spin_lock(&c->erase_completion_lock);
131 				if (c->nr_erasing_blocks &&
132 				    list_empty(&c->erase_pending_list) &&
133 				    list_empty(&c->erase_complete_list)) {
134 					DECLARE_WAITQUEUE(wait, current);
135 					set_current_state(TASK_UNINTERRUPTIBLE);
136 					add_wait_queue(&c->erase_wait, &wait);
137 					jffs2_dbg(1, "%s waiting for erase to complete\n",
138 						  __func__);
139 					spin_unlock(&c->erase_completion_lock);
140 
141 					schedule();
142 				} else
143 					spin_unlock(&c->erase_completion_lock);
144 			} else if (ret)
145 				return ret;
146 
147 			cond_resched();
148 
149 			if (signal_pending(current))
150 				return -EINTR;
151 
152 			mutex_lock(&c->alloc_sem);
153 			spin_lock(&c->erase_completion_lock);
154 		}
155 
156 		ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
157 		if (ret) {
158 			jffs2_dbg(1, "%s(): ret is %d\n", __func__, ret);
159 		}
160 	}
161 	spin_unlock(&c->erase_completion_lock);
162 	if (!ret)
163 		ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
164 	if (ret)
165 		mutex_unlock(&c->alloc_sem);
166 	return ret;
167 }
168 
169 int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize,
170 			   uint32_t *len, uint32_t sumsize)
171 {
172 	int ret = -EAGAIN;
173 	minsize = PAD(minsize);
174 
175 	jffs2_dbg(1, "%s(): Requested 0x%x bytes\n", __func__, minsize);
176 
177 	spin_lock(&c->erase_completion_lock);
178 	while(ret == -EAGAIN) {
179 		ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
180 		if (ret) {
181 			jffs2_dbg(1, "%s(): looping, ret is %d\n",
182 				  __func__, ret);
183 		}
184 	}
185 	spin_unlock(&c->erase_completion_lock);
186 	if (!ret)
187 		ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
188 
189 	return ret;
190 }
191 
192 
193 /* Classify nextblock (clean, dirty of verydirty) and force to select an other one */
194 
195 static void jffs2_close_nextblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
196 {
197 
198 	if (c->nextblock == NULL) {
199 		jffs2_dbg(1, "%s(): Erase block at 0x%08x has already been placed in a list\n",
200 			  __func__, jeb->offset);
201 		return;
202 	}
203 	/* Check, if we have a dirty block now, or if it was dirty already */
204 	if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) {
205 		c->dirty_size += jeb->wasted_size;
206 		c->wasted_size -= jeb->wasted_size;
207 		jeb->dirty_size += jeb->wasted_size;
208 		jeb->wasted_size = 0;
209 		if (VERYDIRTY(c, jeb->dirty_size)) {
210 			jffs2_dbg(1, "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
211 				  jeb->offset, jeb->free_size, jeb->dirty_size,
212 				  jeb->used_size);
213 			list_add_tail(&jeb->list, &c->very_dirty_list);
214 		} else {
215 			jffs2_dbg(1, "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
216 				  jeb->offset, jeb->free_size, jeb->dirty_size,
217 				  jeb->used_size);
218 			list_add_tail(&jeb->list, &c->dirty_list);
219 		}
220 	} else {
221 		jffs2_dbg(1, "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
222 			  jeb->offset, jeb->free_size, jeb->dirty_size,
223 			  jeb->used_size);
224 		list_add_tail(&jeb->list, &c->clean_list);
225 	}
226 	c->nextblock = NULL;
227 
228 }
229 
230 /* Select a new jeb for nextblock */
231 
232 static int jffs2_find_nextblock(struct jffs2_sb_info *c)
233 {
234 	struct list_head *next;
235 
236 	/* Take the next block off the 'free' list */
237 
238 	if (list_empty(&c->free_list)) {
239 
240 		if (!c->nr_erasing_blocks &&
241 			!list_empty(&c->erasable_list)) {
242 			struct jffs2_eraseblock *ejeb;
243 
244 			ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
245 			list_move_tail(&ejeb->list, &c->erase_pending_list);
246 			c->nr_erasing_blocks++;
247 			jffs2_garbage_collect_trigger(c);
248 			jffs2_dbg(1, "%s(): Triggering erase of erasable block at 0x%08x\n",
249 				  __func__, ejeb->offset);
250 		}
251 
252 		if (!c->nr_erasing_blocks &&
253 			!list_empty(&c->erasable_pending_wbuf_list)) {
254 			jffs2_dbg(1, "%s(): Flushing write buffer\n",
255 				  __func__);
256 			/* c->nextblock is NULL, no update to c->nextblock allowed */
257 			spin_unlock(&c->erase_completion_lock);
258 			jffs2_flush_wbuf_pad(c);
259 			spin_lock(&c->erase_completion_lock);
260 			/* Have another go. It'll be on the erasable_list now */
261 			return -EAGAIN;
262 		}
263 
264 		if (!c->nr_erasing_blocks) {
265 			/* Ouch. We're in GC, or we wouldn't have got here.
266 			   And there's no space left. At all. */
267 			pr_crit("Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
268 				c->nr_erasing_blocks, c->nr_free_blocks,
269 				list_empty(&c->erasable_list) ? "yes" : "no",
270 				list_empty(&c->erasing_list) ? "yes" : "no",
271 				list_empty(&c->erase_pending_list) ? "yes" : "no");
272 			return -ENOSPC;
273 		}
274 
275 		spin_unlock(&c->erase_completion_lock);
276 		/* Don't wait for it; just erase one right now */
277 		jffs2_erase_pending_blocks(c, 1);
278 		spin_lock(&c->erase_completion_lock);
279 
280 		/* An erase may have failed, decreasing the
281 		   amount of free space available. So we must
282 		   restart from the beginning */
283 		return -EAGAIN;
284 	}
285 
286 	next = c->free_list.next;
287 	list_del(next);
288 	c->nextblock = list_entry(next, struct jffs2_eraseblock, list);
289 	c->nr_free_blocks--;
290 
291 	jffs2_sum_reset_collected(c->summary); /* reset collected summary */
292 
293 #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
294 	/* adjust write buffer offset, else we get a non contiguous write bug */
295 	if (!(c->wbuf_ofs % c->sector_size) && !c->wbuf_len)
296 		c->wbuf_ofs = 0xffffffff;
297 #endif
298 
299 	jffs2_dbg(1, "%s(): new nextblock = 0x%08x\n",
300 		  __func__, c->nextblock->offset);
301 
302 	return 0;
303 }
304 
305 /* Called with alloc sem _and_ erase_completion_lock */
306 static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
307 				  uint32_t *len, uint32_t sumsize)
308 {
309 	struct jffs2_eraseblock *jeb = c->nextblock;
310 	uint32_t reserved_size;				/* for summary information at the end of the jeb */
311 	int ret;
312 
313  restart:
314 	reserved_size = 0;
315 
316 	if (jffs2_sum_active() && (sumsize != JFFS2_SUMMARY_NOSUM_SIZE)) {
317 							/* NOSUM_SIZE means not to generate summary */
318 
319 		if (jeb) {
320 			reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
321 			dbg_summary("minsize=%d , jeb->free=%d ,"
322 						"summary->size=%d , sumsize=%d\n",
323 						minsize, jeb->free_size,
324 						c->summary->sum_size, sumsize);
325 		}
326 
327 		/* Is there enough space for writing out the current node, or we have to
328 		   write out summary information now, close this jeb and select new nextblock? */
329 		if (jeb && (PAD(minsize) + PAD(c->summary->sum_size + sumsize +
330 					JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size)) {
331 
332 			/* Has summary been disabled for this jeb? */
333 			if (jffs2_sum_is_disabled(c->summary)) {
334 				sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
335 				goto restart;
336 			}
337 
338 			/* Writing out the collected summary information */
339 			dbg_summary("generating summary for 0x%08x.\n", jeb->offset);
340 			ret = jffs2_sum_write_sumnode(c);
341 
342 			if (ret)
343 				return ret;
344 
345 			if (jffs2_sum_is_disabled(c->summary)) {
346 				/* jffs2_write_sumnode() couldn't write out the summary information
347 				   diabling summary for this jeb and free the collected information
348 				 */
349 				sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
350 				goto restart;
351 			}
352 
353 			jffs2_close_nextblock(c, jeb);
354 			jeb = NULL;
355 			/* keep always valid value in reserved_size */
356 			reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
357 		}
358 	} else {
359 		if (jeb && minsize > jeb->free_size) {
360 			uint32_t waste;
361 
362 			/* Skip the end of this block and file it as having some dirty space */
363 			/* If there's a pending write to it, flush now */
364 
365 			if (jffs2_wbuf_dirty(c)) {
366 				spin_unlock(&c->erase_completion_lock);
367 				jffs2_dbg(1, "%s(): Flushing write buffer\n",
368 					  __func__);
369 				jffs2_flush_wbuf_pad(c);
370 				spin_lock(&c->erase_completion_lock);
371 				jeb = c->nextblock;
372 				goto restart;
373 			}
374 
375 			spin_unlock(&c->erase_completion_lock);
376 
377 			ret = jffs2_prealloc_raw_node_refs(c, jeb, 1);
378 			if (ret)
379 				return ret;
380 			/* Just lock it again and continue. Nothing much can change because
381 			   we hold c->alloc_sem anyway. In fact, it's not entirely clear why
382 			   we hold c->erase_completion_lock in the majority of this function...
383 			   but that's a question for another (more caffeine-rich) day. */
384 			spin_lock(&c->erase_completion_lock);
385 
386 			waste = jeb->free_size;
387 			jffs2_link_node_ref(c, jeb,
388 					    (jeb->offset + c->sector_size - waste) | REF_OBSOLETE,
389 					    waste, NULL);
390 			/* FIXME: that made it count as dirty. Convert to wasted */
391 			jeb->dirty_size -= waste;
392 			c->dirty_size -= waste;
393 			jeb->wasted_size += waste;
394 			c->wasted_size += waste;
395 
396 			jffs2_close_nextblock(c, jeb);
397 			jeb = NULL;
398 		}
399 	}
400 
401 	if (!jeb) {
402 
403 		ret = jffs2_find_nextblock(c);
404 		if (ret)
405 			return ret;
406 
407 		jeb = c->nextblock;
408 
409 		if (jeb->free_size != c->sector_size - c->cleanmarker_size) {
410 			pr_warn("Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n",
411 				jeb->offset, jeb->free_size);
412 			goto restart;
413 		}
414 	}
415 	/* OK, jeb (==c->nextblock) is now pointing at a block which definitely has
416 	   enough space */
417 	*len = jeb->free_size - reserved_size;
418 
419 	if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size &&
420 	    !jeb->first_node->next_in_ino) {
421 		/* Only node in it beforehand was a CLEANMARKER node (we think).
422 		   So mark it obsolete now that there's going to be another node
423 		   in the block. This will reduce used_size to zero but We've
424 		   already set c->nextblock so that jffs2_mark_node_obsolete()
425 		   won't try to refile it to the dirty_list.
426 		*/
427 		spin_unlock(&c->erase_completion_lock);
428 		jffs2_mark_node_obsolete(c, jeb->first_node);
429 		spin_lock(&c->erase_completion_lock);
430 	}
431 
432 	jffs2_dbg(1, "%s(): Giving 0x%x bytes at 0x%x\n",
433 		  __func__,
434 		  *len, jeb->offset + (c->sector_size - jeb->free_size));
435 	return 0;
436 }
437 
438 /**
439  *	jffs2_add_physical_node_ref - add a physical node reference to the list
440  *	@c: superblock info
441  *	@new: new node reference to add
442  *	@len: length of this physical node
443  *
444  *	Should only be used to report nodes for which space has been allocated
445  *	by jffs2_reserve_space.
446  *
447  *	Must be called with the alloc_sem held.
448  */
449 
450 struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c,
451 						       uint32_t ofs, uint32_t len,
452 						       struct jffs2_inode_cache *ic)
453 {
454 	struct jffs2_eraseblock *jeb;
455 	struct jffs2_raw_node_ref *new;
456 
457 	jeb = &c->blocks[ofs / c->sector_size];
458 
459 	jffs2_dbg(1, "%s(): Node at 0x%x(%d), size 0x%x\n",
460 		  __func__, ofs & ~3, ofs & 3, len);
461 #if 1
462 	/* Allow non-obsolete nodes only to be added at the end of c->nextblock,
463 	   if c->nextblock is set. Note that wbuf.c will file obsolete nodes
464 	   even after refiling c->nextblock */
465 	if ((c->nextblock || ((ofs & 3) != REF_OBSOLETE))
466 	    && (jeb != c->nextblock || (ofs & ~3) != jeb->offset + (c->sector_size - jeb->free_size))) {
467 		pr_warn("argh. node added in wrong place at 0x%08x(%d)\n",
468 			ofs & ~3, ofs & 3);
469 		if (c->nextblock)
470 			pr_warn("nextblock 0x%08x", c->nextblock->offset);
471 		else
472 			pr_warn("No nextblock");
473 		pr_cont(", expected at %08x\n",
474 			jeb->offset + (c->sector_size - jeb->free_size));
475 		return ERR_PTR(-EINVAL);
476 	}
477 #endif
478 	spin_lock(&c->erase_completion_lock);
479 
480 	new = jffs2_link_node_ref(c, jeb, ofs, len, ic);
481 
482 	if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) {
483 		/* If it lives on the dirty_list, jffs2_reserve_space will put it there */
484 		jffs2_dbg(1, "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
485 			  jeb->offset, jeb->free_size, jeb->dirty_size,
486 			  jeb->used_size);
487 		if (jffs2_wbuf_dirty(c)) {
488 			/* Flush the last write in the block if it's outstanding */
489 			spin_unlock(&c->erase_completion_lock);
490 			jffs2_flush_wbuf_pad(c);
491 			spin_lock(&c->erase_completion_lock);
492 		}
493 
494 		list_add_tail(&jeb->list, &c->clean_list);
495 		c->nextblock = NULL;
496 	}
497 	jffs2_dbg_acct_sanity_check_nolock(c,jeb);
498 	jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
499 
500 	spin_unlock(&c->erase_completion_lock);
501 
502 	return new;
503 }
504 
505 
506 void jffs2_complete_reservation(struct jffs2_sb_info *c)
507 {
508 	jffs2_dbg(1, "jffs2_complete_reservation()\n");
509 	spin_lock(&c->erase_completion_lock);
510 	jffs2_garbage_collect_trigger(c);
511 	spin_unlock(&c->erase_completion_lock);
512 	mutex_unlock(&c->alloc_sem);
513 }
514 
515 static inline int on_list(struct list_head *obj, struct list_head *head)
516 {
517 	struct list_head *this;
518 
519 	list_for_each(this, head) {
520 		if (this == obj) {
521 			jffs2_dbg(1, "%p is on list at %p\n", obj, head);
522 			return 1;
523 
524 		}
525 	}
526 	return 0;
527 }
528 
529 void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref)
530 {
531 	struct jffs2_eraseblock *jeb;
532 	int blocknr;
533 	struct jffs2_unknown_node n;
534 	int ret, addedsize;
535 	size_t retlen;
536 	uint32_t freed_len;
537 
538 	if(unlikely(!ref)) {
539 		pr_notice("EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
540 		return;
541 	}
542 	if (ref_obsolete(ref)) {
543 		jffs2_dbg(1, "%s(): called with already obsolete node at 0x%08x\n",
544 			  __func__, ref_offset(ref));
545 		return;
546 	}
547 	blocknr = ref->flash_offset / c->sector_size;
548 	if (blocknr >= c->nr_blocks) {
549 		pr_notice("raw node at 0x%08x is off the end of device!\n",
550 			  ref->flash_offset);
551 		BUG();
552 	}
553 	jeb = &c->blocks[blocknr];
554 
555 	if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) &&
556 	    !(c->flags & (JFFS2_SB_FLAG_SCANNING | JFFS2_SB_FLAG_BUILDING))) {
557 		/* Hm. This may confuse static lock analysis. If any of the above
558 		   three conditions is false, we're going to return from this
559 		   function without actually obliterating any nodes or freeing
560 		   any jffs2_raw_node_refs. So we don't need to stop erases from
561 		   happening, or protect against people holding an obsolete
562 		   jffs2_raw_node_ref without the erase_completion_lock. */
563 		mutex_lock(&c->erase_free_sem);
564 	}
565 
566 	spin_lock(&c->erase_completion_lock);
567 
568 	freed_len = ref_totlen(c, jeb, ref);
569 
570 	if (ref_flags(ref) == REF_UNCHECKED) {
571 		D1(if (unlikely(jeb->unchecked_size < freed_len)) {
572 				pr_notice("raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n",
573 					  freed_len, blocknr,
574 					  ref->flash_offset, jeb->used_size);
575 			BUG();
576 		})
577 			jffs2_dbg(1, "Obsoleting previously unchecked node at 0x%08x of len %x\n",
578 				  ref_offset(ref), freed_len);
579 		jeb->unchecked_size -= freed_len;
580 		c->unchecked_size -= freed_len;
581 	} else {
582 		D1(if (unlikely(jeb->used_size < freed_len)) {
583 				pr_notice("raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
584 					  freed_len, blocknr,
585 					  ref->flash_offset, jeb->used_size);
586 			BUG();
587 		})
588 			jffs2_dbg(1, "Obsoleting node at 0x%08x of len %#x: ",
589 				  ref_offset(ref), freed_len);
590 		jeb->used_size -= freed_len;
591 		c->used_size -= freed_len;
592 	}
593 
594 	// Take care, that wasted size is taken into concern
595 	if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + freed_len)) && jeb != c->nextblock) {
596 		jffs2_dbg(1, "Dirtying\n");
597 		addedsize = freed_len;
598 		jeb->dirty_size += freed_len;
599 		c->dirty_size += freed_len;
600 
601 		/* Convert wasted space to dirty, if not a bad block */
602 		if (jeb->wasted_size) {
603 			if (on_list(&jeb->list, &c->bad_used_list)) {
604 				jffs2_dbg(1, "Leaving block at %08x on the bad_used_list\n",
605 					  jeb->offset);
606 				addedsize = 0; /* To fool the refiling code later */
607 			} else {
608 				jffs2_dbg(1, "Converting %d bytes of wasted space to dirty in block at %08x\n",
609 					  jeb->wasted_size, jeb->offset);
610 				addedsize += jeb->wasted_size;
611 				jeb->dirty_size += jeb->wasted_size;
612 				c->dirty_size += jeb->wasted_size;
613 				c->wasted_size -= jeb->wasted_size;
614 				jeb->wasted_size = 0;
615 			}
616 		}
617 	} else {
618 		jffs2_dbg(1, "Wasting\n");
619 		addedsize = 0;
620 		jeb->wasted_size += freed_len;
621 		c->wasted_size += freed_len;
622 	}
623 	ref->flash_offset = ref_offset(ref) | REF_OBSOLETE;
624 
625 	jffs2_dbg_acct_sanity_check_nolock(c, jeb);
626 	jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
627 
628 	if (c->flags & JFFS2_SB_FLAG_SCANNING) {
629 		/* Flash scanning is in progress. Don't muck about with the block
630 		   lists because they're not ready yet, and don't actually
631 		   obliterate nodes that look obsolete. If they weren't
632 		   marked obsolete on the flash at the time they _became_
633 		   obsolete, there was probably a reason for that. */
634 		spin_unlock(&c->erase_completion_lock);
635 		/* We didn't lock the erase_free_sem */
636 		return;
637 	}
638 
639 	if (jeb == c->nextblock) {
640 		jffs2_dbg(2, "Not moving nextblock 0x%08x to dirty/erase_pending list\n",
641 			  jeb->offset);
642 	} else if (!jeb->used_size && !jeb->unchecked_size) {
643 		if (jeb == c->gcblock) {
644 			jffs2_dbg(1, "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n",
645 				  jeb->offset);
646 			c->gcblock = NULL;
647 		} else {
648 			jffs2_dbg(1, "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n",
649 				  jeb->offset);
650 			list_del(&jeb->list);
651 		}
652 		if (jffs2_wbuf_dirty(c)) {
653 			jffs2_dbg(1, "...and adding to erasable_pending_wbuf_list\n");
654 			list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list);
655 		} else {
656 			if (jiffies & 127) {
657 				/* Most of the time, we just erase it immediately. Otherwise we
658 				   spend ages scanning it on mount, etc. */
659 				jffs2_dbg(1, "...and adding to erase_pending_list\n");
660 				list_add_tail(&jeb->list, &c->erase_pending_list);
661 				c->nr_erasing_blocks++;
662 				jffs2_garbage_collect_trigger(c);
663 			} else {
664 				/* Sometimes, however, we leave it elsewhere so it doesn't get
665 				   immediately reused, and we spread the load a bit. */
666 				jffs2_dbg(1, "...and adding to erasable_list\n");
667 				list_add_tail(&jeb->list, &c->erasable_list);
668 			}
669 		}
670 		jffs2_dbg(1, "Done OK\n");
671 	} else if (jeb == c->gcblock) {
672 		jffs2_dbg(2, "Not moving gcblock 0x%08x to dirty_list\n",
673 			  jeb->offset);
674 	} else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) {
675 		jffs2_dbg(1, "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n",
676 			  jeb->offset);
677 		list_del(&jeb->list);
678 		jffs2_dbg(1, "...and adding to dirty_list\n");
679 		list_add_tail(&jeb->list, &c->dirty_list);
680 	} else if (VERYDIRTY(c, jeb->dirty_size) &&
681 		   !VERYDIRTY(c, jeb->dirty_size - addedsize)) {
682 		jffs2_dbg(1, "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n",
683 			  jeb->offset);
684 		list_del(&jeb->list);
685 		jffs2_dbg(1, "...and adding to very_dirty_list\n");
686 		list_add_tail(&jeb->list, &c->very_dirty_list);
687 	} else {
688 		jffs2_dbg(1, "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n",
689 			  jeb->offset, jeb->free_size, jeb->dirty_size,
690 			  jeb->used_size);
691 	}
692 
693 	spin_unlock(&c->erase_completion_lock);
694 
695 	if (!jffs2_can_mark_obsolete(c) || jffs2_is_readonly(c) ||
696 		(c->flags & JFFS2_SB_FLAG_BUILDING)) {
697 		/* We didn't lock the erase_free_sem */
698 		return;
699 	}
700 
701 	/* The erase_free_sem is locked, and has been since before we marked the node obsolete
702 	   and potentially put its eraseblock onto the erase_pending_list. Thus, we know that
703 	   the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet
704 	   by jffs2_free_jeb_node_refs() in erase.c. Which is nice. */
705 
706 	jffs2_dbg(1, "obliterating obsoleted node at 0x%08x\n",
707 		  ref_offset(ref));
708 	ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
709 	if (ret) {
710 		pr_warn("Read error reading from obsoleted node at 0x%08x: %d\n",
711 			ref_offset(ref), ret);
712 		goto out_erase_sem;
713 	}
714 	if (retlen != sizeof(n)) {
715 		pr_warn("Short read from obsoleted node at 0x%08x: %zd\n",
716 			ref_offset(ref), retlen);
717 		goto out_erase_sem;
718 	}
719 	if (PAD(je32_to_cpu(n.totlen)) != PAD(freed_len)) {
720 		pr_warn("Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n",
721 			je32_to_cpu(n.totlen), freed_len);
722 		goto out_erase_sem;
723 	}
724 	if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) {
725 		jffs2_dbg(1, "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n",
726 			  ref_offset(ref), je16_to_cpu(n.nodetype));
727 		goto out_erase_sem;
728 	}
729 	/* XXX FIXME: This is ugly now */
730 	n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE);
731 	ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
732 	if (ret) {
733 		pr_warn("Write error in obliterating obsoleted node at 0x%08x: %d\n",
734 			ref_offset(ref), ret);
735 		goto out_erase_sem;
736 	}
737 	if (retlen != sizeof(n)) {
738 		pr_warn("Short write in obliterating obsoleted node at 0x%08x: %zd\n",
739 			ref_offset(ref), retlen);
740 		goto out_erase_sem;
741 	}
742 
743 	/* Nodes which have been marked obsolete no longer need to be
744 	   associated with any inode. Remove them from the per-inode list.
745 
746 	   Note we can't do this for NAND at the moment because we need
747 	   obsolete dirent nodes to stay on the lists, because of the
748 	   horridness in jffs2_garbage_collect_deletion_dirent(). Also
749 	   because we delete the inocache, and on NAND we need that to
750 	   stay around until all the nodes are actually erased, in order
751 	   to stop us from giving the same inode number to another newly
752 	   created inode. */
753 	if (ref->next_in_ino) {
754 		struct jffs2_inode_cache *ic;
755 		struct jffs2_raw_node_ref **p;
756 
757 		spin_lock(&c->erase_completion_lock);
758 
759 		ic = jffs2_raw_ref_to_ic(ref);
760 		for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino))
761 			;
762 
763 		*p = ref->next_in_ino;
764 		ref->next_in_ino = NULL;
765 
766 		switch (ic->class) {
767 #ifdef CONFIG_JFFS2_FS_XATTR
768 			case RAWNODE_CLASS_XATTR_DATUM:
769 				jffs2_release_xattr_datum(c, (struct jffs2_xattr_datum *)ic);
770 				break;
771 			case RAWNODE_CLASS_XATTR_REF:
772 				jffs2_release_xattr_ref(c, (struct jffs2_xattr_ref *)ic);
773 				break;
774 #endif
775 			default:
776 				if (ic->nodes == (void *)ic && ic->pino_nlink == 0)
777 					jffs2_del_ino_cache(c, ic);
778 				break;
779 		}
780 		spin_unlock(&c->erase_completion_lock);
781 	}
782 
783  out_erase_sem:
784 	mutex_unlock(&c->erase_free_sem);
785 }
786 
787 int jffs2_thread_should_wake(struct jffs2_sb_info *c)
788 {
789 	int ret = 0;
790 	uint32_t dirty;
791 	int nr_very_dirty = 0;
792 	struct jffs2_eraseblock *jeb;
793 
794 	if (!list_empty(&c->erase_complete_list) ||
795 	    !list_empty(&c->erase_pending_list))
796 		return 1;
797 
798 	if (c->unchecked_size) {
799 		jffs2_dbg(1, "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n",
800 			  c->unchecked_size, c->checked_ino);
801 		return 1;
802 	}
803 
804 	/* dirty_size contains blocks on erase_pending_list
805 	 * those blocks are counted in c->nr_erasing_blocks.
806 	 * If one block is actually erased, it is not longer counted as dirty_space
807 	 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
808 	 * with c->nr_erasing_blocks * c->sector_size again.
809 	 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
810 	 * This helps us to force gc and pick eventually a clean block to spread the load.
811 	 */
812 	dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size;
813 
814 	if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger &&
815 			(dirty > c->nospc_dirty_size))
816 		ret = 1;
817 
818 	list_for_each_entry(jeb, &c->very_dirty_list, list) {
819 		nr_very_dirty++;
820 		if (nr_very_dirty == c->vdirty_blocks_gctrigger) {
821 			ret = 1;
822 			/* In debug mode, actually go through and count them all */
823 			D1(continue);
824 			break;
825 		}
826 	}
827 
828 	jffs2_dbg(1, "%s(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x, vdirty_blocks %d: %s\n",
829 		  __func__, c->nr_free_blocks, c->nr_erasing_blocks,
830 		  c->dirty_size, nr_very_dirty, ret ? "yes" : "no");
831 
832 	return ret;
833 }
834