xref: /openbmc/linux/fs/jffs2/nodemgmt.c (revision da320f05)
1 /*
2  * JFFS2 -- Journalling Flash File System, Version 2.
3  *
4  * Copyright © 2001-2007 Red Hat, Inc.
5  *
6  * Created by David Woodhouse <dwmw2@infradead.org>
7  *
8  * For licensing information, see the file 'LICENCE' in this directory.
9  *
10  */
11 
12 #include <linux/kernel.h>
13 #include <linux/mtd/mtd.h>
14 #include <linux/compiler.h>
15 #include <linux/sched.h> /* For cond_resched() */
16 #include "nodelist.h"
17 #include "debug.h"
18 
19 /**
20  *	jffs2_reserve_space - request physical space to write nodes to flash
21  *	@c: superblock info
22  *	@minsize: Minimum acceptable size of allocation
23  *	@len: Returned value of allocation length
24  *	@prio: Allocation type - ALLOC_{NORMAL,DELETION}
25  *
26  *	Requests a block of physical space on the flash. Returns zero for success
27  *	and puts 'len' into the appropriate place, or returns -ENOSPC or other
28  *	error if appropriate. Doesn't return len since that's
29  *
30  *	If it returns zero, jffs2_reserve_space() also downs the per-filesystem
31  *	allocation semaphore, to prevent more than one allocation from being
32  *	active at any time. The semaphore is later released by jffs2_commit_allocation()
33  *
34  *	jffs2_reserve_space() may trigger garbage collection in order to make room
35  *	for the requested allocation.
36  */
37 
38 static int jffs2_do_reserve_space(struct jffs2_sb_info *c,  uint32_t minsize,
39 				  uint32_t *len, uint32_t sumsize);
40 
41 int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
42 			uint32_t *len, int prio, uint32_t sumsize)
43 {
44 	int ret = -EAGAIN;
45 	int blocksneeded = c->resv_blocks_write;
46 	/* align it */
47 	minsize = PAD(minsize);
48 
49 	jffs2_dbg(1, "%s(): Requested 0x%x bytes\n", __func__, minsize);
50 	mutex_lock(&c->alloc_sem);
51 
52 	jffs2_dbg(1, "%s(): alloc sem got\n", __func__);
53 
54 	spin_lock(&c->erase_completion_lock);
55 
56 	/* this needs a little more thought (true <tglx> :)) */
57 	while(ret == -EAGAIN) {
58 		while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) {
59 			uint32_t dirty, avail;
60 
61 			/* calculate real dirty size
62 			 * dirty_size contains blocks on erase_pending_list
63 			 * those blocks are counted in c->nr_erasing_blocks.
64 			 * If one block is actually erased, it is not longer counted as dirty_space
65 			 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
66 			 * with c->nr_erasing_blocks * c->sector_size again.
67 			 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
68 			 * This helps us to force gc and pick eventually a clean block to spread the load.
69 			 * We add unchecked_size here, as we hopefully will find some space to use.
70 			 * This will affect the sum only once, as gc first finishes checking
71 			 * of nodes.
72 			 */
73 			dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size;
74 			if (dirty < c->nospc_dirty_size) {
75 				if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
76 					jffs2_dbg(1, "%s(): Low on dirty space to GC, but it's a deletion. Allowing...\n",
77 						  __func__);
78 					break;
79 				}
80 				jffs2_dbg(1, "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n",
81 					  dirty, c->unchecked_size,
82 					  c->sector_size);
83 
84 				spin_unlock(&c->erase_completion_lock);
85 				mutex_unlock(&c->alloc_sem);
86 				return -ENOSPC;
87 			}
88 
89 			/* Calc possibly available space. Possibly available means that we
90 			 * don't know, if unchecked size contains obsoleted nodes, which could give us some
91 			 * more usable space. This will affect the sum only once, as gc first finishes checking
92 			 * of nodes.
93 			 + Return -ENOSPC, if the maximum possibly available space is less or equal than
94 			 * blocksneeded * sector_size.
95 			 * This blocks endless gc looping on a filesystem, which is nearly full, even if
96 			 * the check above passes.
97 			 */
98 			avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size;
99 			if ( (avail / c->sector_size) <= blocksneeded) {
100 				if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
101 					jffs2_dbg(1, "%s(): Low on possibly available space, but it's a deletion. Allowing...\n",
102 						  __func__);
103 					break;
104 				}
105 
106 				jffs2_dbg(1, "max. available size 0x%08x  < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n",
107 					  avail, blocksneeded * c->sector_size);
108 				spin_unlock(&c->erase_completion_lock);
109 				mutex_unlock(&c->alloc_sem);
110 				return -ENOSPC;
111 			}
112 
113 			mutex_unlock(&c->alloc_sem);
114 
115 			jffs2_dbg(1, "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
116 				  c->nr_free_blocks, c->nr_erasing_blocks,
117 				  c->free_size, c->dirty_size, c->wasted_size,
118 				  c->used_size, c->erasing_size, c->bad_size,
119 				  c->free_size + c->dirty_size +
120 				  c->wasted_size + c->used_size +
121 				  c->erasing_size + c->bad_size,
122 				  c->flash_size);
123 			spin_unlock(&c->erase_completion_lock);
124 
125 			ret = jffs2_garbage_collect_pass(c);
126 
127 			if (ret == -EAGAIN) {
128 				spin_lock(&c->erase_completion_lock);
129 				if (c->nr_erasing_blocks &&
130 				    list_empty(&c->erase_pending_list) &&
131 				    list_empty(&c->erase_complete_list)) {
132 					DECLARE_WAITQUEUE(wait, current);
133 					set_current_state(TASK_UNINTERRUPTIBLE);
134 					add_wait_queue(&c->erase_wait, &wait);
135 					jffs2_dbg(1, "%s waiting for erase to complete\n",
136 						  __func__);
137 					spin_unlock(&c->erase_completion_lock);
138 
139 					schedule();
140 				} else
141 					spin_unlock(&c->erase_completion_lock);
142 			} else if (ret)
143 				return ret;
144 
145 			cond_resched();
146 
147 			if (signal_pending(current))
148 				return -EINTR;
149 
150 			mutex_lock(&c->alloc_sem);
151 			spin_lock(&c->erase_completion_lock);
152 		}
153 
154 		ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
155 		if (ret) {
156 			jffs2_dbg(1, "%s(): ret is %d\n", __func__, ret);
157 		}
158 	}
159 	spin_unlock(&c->erase_completion_lock);
160 	if (!ret)
161 		ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
162 	if (ret)
163 		mutex_unlock(&c->alloc_sem);
164 	return ret;
165 }
166 
167 int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize,
168 			   uint32_t *len, uint32_t sumsize)
169 {
170 	int ret = -EAGAIN;
171 	minsize = PAD(minsize);
172 
173 	jffs2_dbg(1, "%s(): Requested 0x%x bytes\n", __func__, minsize);
174 
175 	spin_lock(&c->erase_completion_lock);
176 	while(ret == -EAGAIN) {
177 		ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
178 		if (ret) {
179 			jffs2_dbg(1, "%s(): looping, ret is %d\n",
180 				  __func__, ret);
181 		}
182 	}
183 	spin_unlock(&c->erase_completion_lock);
184 	if (!ret)
185 		ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
186 
187 	return ret;
188 }
189 
190 
191 /* Classify nextblock (clean, dirty of verydirty) and force to select an other one */
192 
193 static void jffs2_close_nextblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
194 {
195 
196 	if (c->nextblock == NULL) {
197 		jffs2_dbg(1, "%s(): Erase block at 0x%08x has already been placed in a list\n",
198 			  __func__, jeb->offset);
199 		return;
200 	}
201 	/* Check, if we have a dirty block now, or if it was dirty already */
202 	if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) {
203 		c->dirty_size += jeb->wasted_size;
204 		c->wasted_size -= jeb->wasted_size;
205 		jeb->dirty_size += jeb->wasted_size;
206 		jeb->wasted_size = 0;
207 		if (VERYDIRTY(c, jeb->dirty_size)) {
208 			jffs2_dbg(1, "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
209 				  jeb->offset, jeb->free_size, jeb->dirty_size,
210 				  jeb->used_size);
211 			list_add_tail(&jeb->list, &c->very_dirty_list);
212 		} else {
213 			jffs2_dbg(1, "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
214 				  jeb->offset, jeb->free_size, jeb->dirty_size,
215 				  jeb->used_size);
216 			list_add_tail(&jeb->list, &c->dirty_list);
217 		}
218 	} else {
219 		jffs2_dbg(1, "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
220 			  jeb->offset, jeb->free_size, jeb->dirty_size,
221 			  jeb->used_size);
222 		list_add_tail(&jeb->list, &c->clean_list);
223 	}
224 	c->nextblock = NULL;
225 
226 }
227 
228 /* Select a new jeb for nextblock */
229 
230 static int jffs2_find_nextblock(struct jffs2_sb_info *c)
231 {
232 	struct list_head *next;
233 
234 	/* Take the next block off the 'free' list */
235 
236 	if (list_empty(&c->free_list)) {
237 
238 		if (!c->nr_erasing_blocks &&
239 			!list_empty(&c->erasable_list)) {
240 			struct jffs2_eraseblock *ejeb;
241 
242 			ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
243 			list_move_tail(&ejeb->list, &c->erase_pending_list);
244 			c->nr_erasing_blocks++;
245 			jffs2_garbage_collect_trigger(c);
246 			jffs2_dbg(1, "%s(): Triggering erase of erasable block at 0x%08x\n",
247 				  __func__, ejeb->offset);
248 		}
249 
250 		if (!c->nr_erasing_blocks &&
251 			!list_empty(&c->erasable_pending_wbuf_list)) {
252 			jffs2_dbg(1, "%s(): Flushing write buffer\n",
253 				  __func__);
254 			/* c->nextblock is NULL, no update to c->nextblock allowed */
255 			spin_unlock(&c->erase_completion_lock);
256 			jffs2_flush_wbuf_pad(c);
257 			spin_lock(&c->erase_completion_lock);
258 			/* Have another go. It'll be on the erasable_list now */
259 			return -EAGAIN;
260 		}
261 
262 		if (!c->nr_erasing_blocks) {
263 			/* Ouch. We're in GC, or we wouldn't have got here.
264 			   And there's no space left. At all. */
265 			pr_crit("Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
266 				c->nr_erasing_blocks, c->nr_free_blocks,
267 				list_empty(&c->erasable_list) ? "yes" : "no",
268 				list_empty(&c->erasing_list) ? "yes" : "no",
269 				list_empty(&c->erase_pending_list) ? "yes" : "no");
270 			return -ENOSPC;
271 		}
272 
273 		spin_unlock(&c->erase_completion_lock);
274 		/* Don't wait for it; just erase one right now */
275 		jffs2_erase_pending_blocks(c, 1);
276 		spin_lock(&c->erase_completion_lock);
277 
278 		/* An erase may have failed, decreasing the
279 		   amount of free space available. So we must
280 		   restart from the beginning */
281 		return -EAGAIN;
282 	}
283 
284 	next = c->free_list.next;
285 	list_del(next);
286 	c->nextblock = list_entry(next, struct jffs2_eraseblock, list);
287 	c->nr_free_blocks--;
288 
289 	jffs2_sum_reset_collected(c->summary); /* reset collected summary */
290 
291 #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
292 	/* adjust write buffer offset, else we get a non contiguous write bug */
293 	if (!(c->wbuf_ofs % c->sector_size) && !c->wbuf_len)
294 		c->wbuf_ofs = 0xffffffff;
295 #endif
296 
297 	jffs2_dbg(1, "%s(): new nextblock = 0x%08x\n",
298 		  __func__, c->nextblock->offset);
299 
300 	return 0;
301 }
302 
303 /* Called with alloc sem _and_ erase_completion_lock */
304 static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
305 				  uint32_t *len, uint32_t sumsize)
306 {
307 	struct jffs2_eraseblock *jeb = c->nextblock;
308 	uint32_t reserved_size;				/* for summary information at the end of the jeb */
309 	int ret;
310 
311  restart:
312 	reserved_size = 0;
313 
314 	if (jffs2_sum_active() && (sumsize != JFFS2_SUMMARY_NOSUM_SIZE)) {
315 							/* NOSUM_SIZE means not to generate summary */
316 
317 		if (jeb) {
318 			reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
319 			dbg_summary("minsize=%d , jeb->free=%d ,"
320 						"summary->size=%d , sumsize=%d\n",
321 						minsize, jeb->free_size,
322 						c->summary->sum_size, sumsize);
323 		}
324 
325 		/* Is there enough space for writing out the current node, or we have to
326 		   write out summary information now, close this jeb and select new nextblock? */
327 		if (jeb && (PAD(minsize) + PAD(c->summary->sum_size + sumsize +
328 					JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size)) {
329 
330 			/* Has summary been disabled for this jeb? */
331 			if (jffs2_sum_is_disabled(c->summary)) {
332 				sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
333 				goto restart;
334 			}
335 
336 			/* Writing out the collected summary information */
337 			dbg_summary("generating summary for 0x%08x.\n", jeb->offset);
338 			ret = jffs2_sum_write_sumnode(c);
339 
340 			if (ret)
341 				return ret;
342 
343 			if (jffs2_sum_is_disabled(c->summary)) {
344 				/* jffs2_write_sumnode() couldn't write out the summary information
345 				   diabling summary for this jeb and free the collected information
346 				 */
347 				sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
348 				goto restart;
349 			}
350 
351 			jffs2_close_nextblock(c, jeb);
352 			jeb = NULL;
353 			/* keep always valid value in reserved_size */
354 			reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
355 		}
356 	} else {
357 		if (jeb && minsize > jeb->free_size) {
358 			uint32_t waste;
359 
360 			/* Skip the end of this block and file it as having some dirty space */
361 			/* If there's a pending write to it, flush now */
362 
363 			if (jffs2_wbuf_dirty(c)) {
364 				spin_unlock(&c->erase_completion_lock);
365 				jffs2_dbg(1, "%s(): Flushing write buffer\n",
366 					  __func__);
367 				jffs2_flush_wbuf_pad(c);
368 				spin_lock(&c->erase_completion_lock);
369 				jeb = c->nextblock;
370 				goto restart;
371 			}
372 
373 			spin_unlock(&c->erase_completion_lock);
374 
375 			ret = jffs2_prealloc_raw_node_refs(c, jeb, 1);
376 			if (ret)
377 				return ret;
378 			/* Just lock it again and continue. Nothing much can change because
379 			   we hold c->alloc_sem anyway. In fact, it's not entirely clear why
380 			   we hold c->erase_completion_lock in the majority of this function...
381 			   but that's a question for another (more caffeine-rich) day. */
382 			spin_lock(&c->erase_completion_lock);
383 
384 			waste = jeb->free_size;
385 			jffs2_link_node_ref(c, jeb,
386 					    (jeb->offset + c->sector_size - waste) | REF_OBSOLETE,
387 					    waste, NULL);
388 			/* FIXME: that made it count as dirty. Convert to wasted */
389 			jeb->dirty_size -= waste;
390 			c->dirty_size -= waste;
391 			jeb->wasted_size += waste;
392 			c->wasted_size += waste;
393 
394 			jffs2_close_nextblock(c, jeb);
395 			jeb = NULL;
396 		}
397 	}
398 
399 	if (!jeb) {
400 
401 		ret = jffs2_find_nextblock(c);
402 		if (ret)
403 			return ret;
404 
405 		jeb = c->nextblock;
406 
407 		if (jeb->free_size != c->sector_size - c->cleanmarker_size) {
408 			pr_warn("Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n",
409 				jeb->offset, jeb->free_size);
410 			goto restart;
411 		}
412 	}
413 	/* OK, jeb (==c->nextblock) is now pointing at a block which definitely has
414 	   enough space */
415 	*len = jeb->free_size - reserved_size;
416 
417 	if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size &&
418 	    !jeb->first_node->next_in_ino) {
419 		/* Only node in it beforehand was a CLEANMARKER node (we think).
420 		   So mark it obsolete now that there's going to be another node
421 		   in the block. This will reduce used_size to zero but We've
422 		   already set c->nextblock so that jffs2_mark_node_obsolete()
423 		   won't try to refile it to the dirty_list.
424 		*/
425 		spin_unlock(&c->erase_completion_lock);
426 		jffs2_mark_node_obsolete(c, jeb->first_node);
427 		spin_lock(&c->erase_completion_lock);
428 	}
429 
430 	jffs2_dbg(1, "%s(): Giving 0x%x bytes at 0x%x\n",
431 		  __func__,
432 		  *len, jeb->offset + (c->sector_size - jeb->free_size));
433 	return 0;
434 }
435 
436 /**
437  *	jffs2_add_physical_node_ref - add a physical node reference to the list
438  *	@c: superblock info
439  *	@new: new node reference to add
440  *	@len: length of this physical node
441  *
442  *	Should only be used to report nodes for which space has been allocated
443  *	by jffs2_reserve_space.
444  *
445  *	Must be called with the alloc_sem held.
446  */
447 
448 struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c,
449 						       uint32_t ofs, uint32_t len,
450 						       struct jffs2_inode_cache *ic)
451 {
452 	struct jffs2_eraseblock *jeb;
453 	struct jffs2_raw_node_ref *new;
454 
455 	jeb = &c->blocks[ofs / c->sector_size];
456 
457 	jffs2_dbg(1, "%s(): Node at 0x%x(%d), size 0x%x\n",
458 		  __func__, ofs & ~3, ofs & 3, len);
459 #if 1
460 	/* Allow non-obsolete nodes only to be added at the end of c->nextblock,
461 	   if c->nextblock is set. Note that wbuf.c will file obsolete nodes
462 	   even after refiling c->nextblock */
463 	if ((c->nextblock || ((ofs & 3) != REF_OBSOLETE))
464 	    && (jeb != c->nextblock || (ofs & ~3) != jeb->offset + (c->sector_size - jeb->free_size))) {
465 		pr_warn("argh. node added in wrong place at 0x%08x(%d)\n",
466 			ofs & ~3, ofs & 3);
467 		if (c->nextblock)
468 			pr_warn("nextblock 0x%08x", c->nextblock->offset);
469 		else
470 			pr_warn("No nextblock");
471 		pr_cont(", expected at %08x\n",
472 			jeb->offset + (c->sector_size - jeb->free_size));
473 		return ERR_PTR(-EINVAL);
474 	}
475 #endif
476 	spin_lock(&c->erase_completion_lock);
477 
478 	new = jffs2_link_node_ref(c, jeb, ofs, len, ic);
479 
480 	if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) {
481 		/* If it lives on the dirty_list, jffs2_reserve_space will put it there */
482 		jffs2_dbg(1, "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
483 			  jeb->offset, jeb->free_size, jeb->dirty_size,
484 			  jeb->used_size);
485 		if (jffs2_wbuf_dirty(c)) {
486 			/* Flush the last write in the block if it's outstanding */
487 			spin_unlock(&c->erase_completion_lock);
488 			jffs2_flush_wbuf_pad(c);
489 			spin_lock(&c->erase_completion_lock);
490 		}
491 
492 		list_add_tail(&jeb->list, &c->clean_list);
493 		c->nextblock = NULL;
494 	}
495 	jffs2_dbg_acct_sanity_check_nolock(c,jeb);
496 	jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
497 
498 	spin_unlock(&c->erase_completion_lock);
499 
500 	return new;
501 }
502 
503 
504 void jffs2_complete_reservation(struct jffs2_sb_info *c)
505 {
506 	jffs2_dbg(1, "jffs2_complete_reservation()\n");
507 	spin_lock(&c->erase_completion_lock);
508 	jffs2_garbage_collect_trigger(c);
509 	spin_unlock(&c->erase_completion_lock);
510 	mutex_unlock(&c->alloc_sem);
511 }
512 
513 static inline int on_list(struct list_head *obj, struct list_head *head)
514 {
515 	struct list_head *this;
516 
517 	list_for_each(this, head) {
518 		if (this == obj) {
519 			jffs2_dbg(1, "%p is on list at %p\n", obj, head);
520 			return 1;
521 
522 		}
523 	}
524 	return 0;
525 }
526 
527 void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref)
528 {
529 	struct jffs2_eraseblock *jeb;
530 	int blocknr;
531 	struct jffs2_unknown_node n;
532 	int ret, addedsize;
533 	size_t retlen;
534 	uint32_t freed_len;
535 
536 	if(unlikely(!ref)) {
537 		pr_notice("EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
538 		return;
539 	}
540 	if (ref_obsolete(ref)) {
541 		jffs2_dbg(1, "%s(): called with already obsolete node at 0x%08x\n",
542 			  __func__, ref_offset(ref));
543 		return;
544 	}
545 	blocknr = ref->flash_offset / c->sector_size;
546 	if (blocknr >= c->nr_blocks) {
547 		pr_notice("raw node at 0x%08x is off the end of device!\n",
548 			  ref->flash_offset);
549 		BUG();
550 	}
551 	jeb = &c->blocks[blocknr];
552 
553 	if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) &&
554 	    !(c->flags & (JFFS2_SB_FLAG_SCANNING | JFFS2_SB_FLAG_BUILDING))) {
555 		/* Hm. This may confuse static lock analysis. If any of the above
556 		   three conditions is false, we're going to return from this
557 		   function without actually obliterating any nodes or freeing
558 		   any jffs2_raw_node_refs. So we don't need to stop erases from
559 		   happening, or protect against people holding an obsolete
560 		   jffs2_raw_node_ref without the erase_completion_lock. */
561 		mutex_lock(&c->erase_free_sem);
562 	}
563 
564 	spin_lock(&c->erase_completion_lock);
565 
566 	freed_len = ref_totlen(c, jeb, ref);
567 
568 	if (ref_flags(ref) == REF_UNCHECKED) {
569 		D1(if (unlikely(jeb->unchecked_size < freed_len)) {
570 				pr_notice("raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n",
571 					  freed_len, blocknr,
572 					  ref->flash_offset, jeb->used_size);
573 			BUG();
574 		})
575 			jffs2_dbg(1, "Obsoleting previously unchecked node at 0x%08x of len %x\n",
576 				  ref_offset(ref), freed_len);
577 		jeb->unchecked_size -= freed_len;
578 		c->unchecked_size -= freed_len;
579 	} else {
580 		D1(if (unlikely(jeb->used_size < freed_len)) {
581 				pr_notice("raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
582 					  freed_len, blocknr,
583 					  ref->flash_offset, jeb->used_size);
584 			BUG();
585 		})
586 			jffs2_dbg(1, "Obsoleting node at 0x%08x of len %#x: ",
587 				  ref_offset(ref), freed_len);
588 		jeb->used_size -= freed_len;
589 		c->used_size -= freed_len;
590 	}
591 
592 	// Take care, that wasted size is taken into concern
593 	if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + freed_len)) && jeb != c->nextblock) {
594 		jffs2_dbg(1, "Dirtying\n");
595 		addedsize = freed_len;
596 		jeb->dirty_size += freed_len;
597 		c->dirty_size += freed_len;
598 
599 		/* Convert wasted space to dirty, if not a bad block */
600 		if (jeb->wasted_size) {
601 			if (on_list(&jeb->list, &c->bad_used_list)) {
602 				jffs2_dbg(1, "Leaving block at %08x on the bad_used_list\n",
603 					  jeb->offset);
604 				addedsize = 0; /* To fool the refiling code later */
605 			} else {
606 				jffs2_dbg(1, "Converting %d bytes of wasted space to dirty in block at %08x\n",
607 					  jeb->wasted_size, jeb->offset);
608 				addedsize += jeb->wasted_size;
609 				jeb->dirty_size += jeb->wasted_size;
610 				c->dirty_size += jeb->wasted_size;
611 				c->wasted_size -= jeb->wasted_size;
612 				jeb->wasted_size = 0;
613 			}
614 		}
615 	} else {
616 		jffs2_dbg(1, "Wasting\n");
617 		addedsize = 0;
618 		jeb->wasted_size += freed_len;
619 		c->wasted_size += freed_len;
620 	}
621 	ref->flash_offset = ref_offset(ref) | REF_OBSOLETE;
622 
623 	jffs2_dbg_acct_sanity_check_nolock(c, jeb);
624 	jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
625 
626 	if (c->flags & JFFS2_SB_FLAG_SCANNING) {
627 		/* Flash scanning is in progress. Don't muck about with the block
628 		   lists because they're not ready yet, and don't actually
629 		   obliterate nodes that look obsolete. If they weren't
630 		   marked obsolete on the flash at the time they _became_
631 		   obsolete, there was probably a reason for that. */
632 		spin_unlock(&c->erase_completion_lock);
633 		/* We didn't lock the erase_free_sem */
634 		return;
635 	}
636 
637 	if (jeb == c->nextblock) {
638 		jffs2_dbg(2, "Not moving nextblock 0x%08x to dirty/erase_pending list\n",
639 			  jeb->offset);
640 	} else if (!jeb->used_size && !jeb->unchecked_size) {
641 		if (jeb == c->gcblock) {
642 			jffs2_dbg(1, "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n",
643 				  jeb->offset);
644 			c->gcblock = NULL;
645 		} else {
646 			jffs2_dbg(1, "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n",
647 				  jeb->offset);
648 			list_del(&jeb->list);
649 		}
650 		if (jffs2_wbuf_dirty(c)) {
651 			jffs2_dbg(1, "...and adding to erasable_pending_wbuf_list\n");
652 			list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list);
653 		} else {
654 			if (jiffies & 127) {
655 				/* Most of the time, we just erase it immediately. Otherwise we
656 				   spend ages scanning it on mount, etc. */
657 				jffs2_dbg(1, "...and adding to erase_pending_list\n");
658 				list_add_tail(&jeb->list, &c->erase_pending_list);
659 				c->nr_erasing_blocks++;
660 				jffs2_garbage_collect_trigger(c);
661 			} else {
662 				/* Sometimes, however, we leave it elsewhere so it doesn't get
663 				   immediately reused, and we spread the load a bit. */
664 				jffs2_dbg(1, "...and adding to erasable_list\n");
665 				list_add_tail(&jeb->list, &c->erasable_list);
666 			}
667 		}
668 		jffs2_dbg(1, "Done OK\n");
669 	} else if (jeb == c->gcblock) {
670 		jffs2_dbg(2, "Not moving gcblock 0x%08x to dirty_list\n",
671 			  jeb->offset);
672 	} else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) {
673 		jffs2_dbg(1, "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n",
674 			  jeb->offset);
675 		list_del(&jeb->list);
676 		jffs2_dbg(1, "...and adding to dirty_list\n");
677 		list_add_tail(&jeb->list, &c->dirty_list);
678 	} else if (VERYDIRTY(c, jeb->dirty_size) &&
679 		   !VERYDIRTY(c, jeb->dirty_size - addedsize)) {
680 		jffs2_dbg(1, "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n",
681 			  jeb->offset);
682 		list_del(&jeb->list);
683 		jffs2_dbg(1, "...and adding to very_dirty_list\n");
684 		list_add_tail(&jeb->list, &c->very_dirty_list);
685 	} else {
686 		jffs2_dbg(1, "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n",
687 			  jeb->offset, jeb->free_size, jeb->dirty_size,
688 			  jeb->used_size);
689 	}
690 
691 	spin_unlock(&c->erase_completion_lock);
692 
693 	if (!jffs2_can_mark_obsolete(c) || jffs2_is_readonly(c) ||
694 		(c->flags & JFFS2_SB_FLAG_BUILDING)) {
695 		/* We didn't lock the erase_free_sem */
696 		return;
697 	}
698 
699 	/* The erase_free_sem is locked, and has been since before we marked the node obsolete
700 	   and potentially put its eraseblock onto the erase_pending_list. Thus, we know that
701 	   the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet
702 	   by jffs2_free_jeb_node_refs() in erase.c. Which is nice. */
703 
704 	jffs2_dbg(1, "obliterating obsoleted node at 0x%08x\n",
705 		  ref_offset(ref));
706 	ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
707 	if (ret) {
708 		pr_warn("Read error reading from obsoleted node at 0x%08x: %d\n",
709 			ref_offset(ref), ret);
710 		goto out_erase_sem;
711 	}
712 	if (retlen != sizeof(n)) {
713 		pr_warn("Short read from obsoleted node at 0x%08x: %zd\n",
714 			ref_offset(ref), retlen);
715 		goto out_erase_sem;
716 	}
717 	if (PAD(je32_to_cpu(n.totlen)) != PAD(freed_len)) {
718 		pr_warn("Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n",
719 			je32_to_cpu(n.totlen), freed_len);
720 		goto out_erase_sem;
721 	}
722 	if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) {
723 		jffs2_dbg(1, "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n",
724 			  ref_offset(ref), je16_to_cpu(n.nodetype));
725 		goto out_erase_sem;
726 	}
727 	/* XXX FIXME: This is ugly now */
728 	n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE);
729 	ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
730 	if (ret) {
731 		pr_warn("Write error in obliterating obsoleted node at 0x%08x: %d\n",
732 			ref_offset(ref), ret);
733 		goto out_erase_sem;
734 	}
735 	if (retlen != sizeof(n)) {
736 		pr_warn("Short write in obliterating obsoleted node at 0x%08x: %zd\n",
737 			ref_offset(ref), retlen);
738 		goto out_erase_sem;
739 	}
740 
741 	/* Nodes which have been marked obsolete no longer need to be
742 	   associated with any inode. Remove them from the per-inode list.
743 
744 	   Note we can't do this for NAND at the moment because we need
745 	   obsolete dirent nodes to stay on the lists, because of the
746 	   horridness in jffs2_garbage_collect_deletion_dirent(). Also
747 	   because we delete the inocache, and on NAND we need that to
748 	   stay around until all the nodes are actually erased, in order
749 	   to stop us from giving the same inode number to another newly
750 	   created inode. */
751 	if (ref->next_in_ino) {
752 		struct jffs2_inode_cache *ic;
753 		struct jffs2_raw_node_ref **p;
754 
755 		spin_lock(&c->erase_completion_lock);
756 
757 		ic = jffs2_raw_ref_to_ic(ref);
758 		for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino))
759 			;
760 
761 		*p = ref->next_in_ino;
762 		ref->next_in_ino = NULL;
763 
764 		switch (ic->class) {
765 #ifdef CONFIG_JFFS2_FS_XATTR
766 			case RAWNODE_CLASS_XATTR_DATUM:
767 				jffs2_release_xattr_datum(c, (struct jffs2_xattr_datum *)ic);
768 				break;
769 			case RAWNODE_CLASS_XATTR_REF:
770 				jffs2_release_xattr_ref(c, (struct jffs2_xattr_ref *)ic);
771 				break;
772 #endif
773 			default:
774 				if (ic->nodes == (void *)ic && ic->pino_nlink == 0)
775 					jffs2_del_ino_cache(c, ic);
776 				break;
777 		}
778 		spin_unlock(&c->erase_completion_lock);
779 	}
780 
781  out_erase_sem:
782 	mutex_unlock(&c->erase_free_sem);
783 }
784 
785 int jffs2_thread_should_wake(struct jffs2_sb_info *c)
786 {
787 	int ret = 0;
788 	uint32_t dirty;
789 	int nr_very_dirty = 0;
790 	struct jffs2_eraseblock *jeb;
791 
792 	if (!list_empty(&c->erase_complete_list) ||
793 	    !list_empty(&c->erase_pending_list))
794 		return 1;
795 
796 	if (c->unchecked_size) {
797 		jffs2_dbg(1, "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n",
798 			  c->unchecked_size, c->checked_ino);
799 		return 1;
800 	}
801 
802 	/* dirty_size contains blocks on erase_pending_list
803 	 * those blocks are counted in c->nr_erasing_blocks.
804 	 * If one block is actually erased, it is not longer counted as dirty_space
805 	 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
806 	 * with c->nr_erasing_blocks * c->sector_size again.
807 	 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
808 	 * This helps us to force gc and pick eventually a clean block to spread the load.
809 	 */
810 	dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size;
811 
812 	if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger &&
813 			(dirty > c->nospc_dirty_size))
814 		ret = 1;
815 
816 	list_for_each_entry(jeb, &c->very_dirty_list, list) {
817 		nr_very_dirty++;
818 		if (nr_very_dirty == c->vdirty_blocks_gctrigger) {
819 			ret = 1;
820 			/* In debug mode, actually go through and count them all */
821 			D1(continue);
822 			break;
823 		}
824 	}
825 
826 	jffs2_dbg(1, "%s(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x, vdirty_blocks %d: %s\n",
827 		  __func__, c->nr_free_blocks, c->nr_erasing_blocks,
828 		  c->dirty_size, nr_very_dirty, ret ? "yes" : "no");
829 
830 	return ret;
831 }
832