xref: /openbmc/linux/fs/jffs2/nodemgmt.c (revision 9c261b33)
1 /*
2  * JFFS2 -- Journalling Flash File System, Version 2.
3  *
4  * Copyright © 2001-2007 Red Hat, Inc.
5  *
6  * Created by David Woodhouse <dwmw2@infradead.org>
7  *
8  * For licensing information, see the file 'LICENCE' in this directory.
9  *
10  */
11 
12 #include <linux/kernel.h>
13 #include <linux/mtd/mtd.h>
14 #include <linux/compiler.h>
15 #include <linux/sched.h> /* For cond_resched() */
16 #include "nodelist.h"
17 #include "debug.h"
18 
19 /**
20  *	jffs2_reserve_space - request physical space to write nodes to flash
21  *	@c: superblock info
22  *	@minsize: Minimum acceptable size of allocation
23  *	@len: Returned value of allocation length
24  *	@prio: Allocation type - ALLOC_{NORMAL,DELETION}
25  *
26  *	Requests a block of physical space on the flash. Returns zero for success
27  *	and puts 'len' into the appropriate place, or returns -ENOSPC or other
28  *	error if appropriate. Doesn't return len since that's
29  *
30  *	If it returns zero, jffs2_reserve_space() also downs the per-filesystem
31  *	allocation semaphore, to prevent more than one allocation from being
32  *	active at any time. The semaphore is later released by jffs2_commit_allocation()
33  *
34  *	jffs2_reserve_space() may trigger garbage collection in order to make room
35  *	for the requested allocation.
36  */
37 
38 static int jffs2_do_reserve_space(struct jffs2_sb_info *c,  uint32_t minsize,
39 				  uint32_t *len, uint32_t sumsize);
40 
41 int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
42 			uint32_t *len, int prio, uint32_t sumsize)
43 {
44 	int ret = -EAGAIN;
45 	int blocksneeded = c->resv_blocks_write;
46 	/* align it */
47 	minsize = PAD(minsize);
48 
49 	jffs2_dbg(1, "%s(): Requested 0x%x bytes\n", __func__, minsize);
50 	mutex_lock(&c->alloc_sem);
51 
52 	jffs2_dbg(1, "%s(): alloc sem got\n", __func__);
53 
54 	spin_lock(&c->erase_completion_lock);
55 
56 	/* this needs a little more thought (true <tglx> :)) */
57 	while(ret == -EAGAIN) {
58 		while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) {
59 			uint32_t dirty, avail;
60 
61 			/* calculate real dirty size
62 			 * dirty_size contains blocks on erase_pending_list
63 			 * those blocks are counted in c->nr_erasing_blocks.
64 			 * If one block is actually erased, it is not longer counted as dirty_space
65 			 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
66 			 * with c->nr_erasing_blocks * c->sector_size again.
67 			 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
68 			 * This helps us to force gc and pick eventually a clean block to spread the load.
69 			 * We add unchecked_size here, as we hopefully will find some space to use.
70 			 * This will affect the sum only once, as gc first finishes checking
71 			 * of nodes.
72 			 */
73 			dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size;
74 			if (dirty < c->nospc_dirty_size) {
75 				if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
76 					jffs2_dbg(1, "%s(): Low on dirty space to GC, but it's a deletion. Allowing...\n",
77 						  __func__);
78 					break;
79 				}
80 				jffs2_dbg(1, "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n",
81 					  dirty, c->unchecked_size,
82 					  c->sector_size);
83 
84 				spin_unlock(&c->erase_completion_lock);
85 				mutex_unlock(&c->alloc_sem);
86 				return -ENOSPC;
87 			}
88 
89 			/* Calc possibly available space. Possibly available means that we
90 			 * don't know, if unchecked size contains obsoleted nodes, which could give us some
91 			 * more usable space. This will affect the sum only once, as gc first finishes checking
92 			 * of nodes.
93 			 + Return -ENOSPC, if the maximum possibly available space is less or equal than
94 			 * blocksneeded * sector_size.
95 			 * This blocks endless gc looping on a filesystem, which is nearly full, even if
96 			 * the check above passes.
97 			 */
98 			avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size;
99 			if ( (avail / c->sector_size) <= blocksneeded) {
100 				if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
101 					jffs2_dbg(1, "%s(): Low on possibly available space, but it's a deletion. Allowing...\n",
102 						  __func__);
103 					break;
104 				}
105 
106 				jffs2_dbg(1, "max. available size 0x%08x  < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n",
107 					  avail, blocksneeded * c->sector_size);
108 				spin_unlock(&c->erase_completion_lock);
109 				mutex_unlock(&c->alloc_sem);
110 				return -ENOSPC;
111 			}
112 
113 			mutex_unlock(&c->alloc_sem);
114 
115 			jffs2_dbg(1, "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
116 				  c->nr_free_blocks, c->nr_erasing_blocks,
117 				  c->free_size, c->dirty_size, c->wasted_size,
118 				  c->used_size, c->erasing_size, c->bad_size,
119 				  c->free_size + c->dirty_size +
120 				  c->wasted_size + c->used_size +
121 				  c->erasing_size + c->bad_size,
122 				  c->flash_size);
123 			spin_unlock(&c->erase_completion_lock);
124 
125 			ret = jffs2_garbage_collect_pass(c);
126 
127 			if (ret == -EAGAIN) {
128 				spin_lock(&c->erase_completion_lock);
129 				if (c->nr_erasing_blocks &&
130 				    list_empty(&c->erase_pending_list) &&
131 				    list_empty(&c->erase_complete_list)) {
132 					DECLARE_WAITQUEUE(wait, current);
133 					set_current_state(TASK_UNINTERRUPTIBLE);
134 					add_wait_queue(&c->erase_wait, &wait);
135 					jffs2_dbg(1, "%s waiting for erase to complete\n",
136 						  __func__);
137 					spin_unlock(&c->erase_completion_lock);
138 
139 					schedule();
140 				} else
141 					spin_unlock(&c->erase_completion_lock);
142 			} else if (ret)
143 				return ret;
144 
145 			cond_resched();
146 
147 			if (signal_pending(current))
148 				return -EINTR;
149 
150 			mutex_lock(&c->alloc_sem);
151 			spin_lock(&c->erase_completion_lock);
152 		}
153 
154 		ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
155 		if (ret) {
156 			jffs2_dbg(1, "%s(): ret is %d\n", __func__, ret);
157 		}
158 	}
159 	spin_unlock(&c->erase_completion_lock);
160 	if (!ret)
161 		ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
162 	if (ret)
163 		mutex_unlock(&c->alloc_sem);
164 	return ret;
165 }
166 
167 int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize,
168 			   uint32_t *len, uint32_t sumsize)
169 {
170 	int ret = -EAGAIN;
171 	minsize = PAD(minsize);
172 
173 	jffs2_dbg(1, "%s(): Requested 0x%x bytes\n", __func__, minsize);
174 
175 	spin_lock(&c->erase_completion_lock);
176 	while(ret == -EAGAIN) {
177 		ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
178 		if (ret) {
179 			jffs2_dbg(1, "%s(): looping, ret is %d\n",
180 				  __func__, ret);
181 		}
182 	}
183 	spin_unlock(&c->erase_completion_lock);
184 	if (!ret)
185 		ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
186 
187 	return ret;
188 }
189 
190 
191 /* Classify nextblock (clean, dirty of verydirty) and force to select an other one */
192 
193 static void jffs2_close_nextblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
194 {
195 
196 	if (c->nextblock == NULL) {
197 		jffs2_dbg(1, "%s(): Erase block at 0x%08x has already been placed in a list\n",
198 			  __func__, jeb->offset);
199 		return;
200 	}
201 	/* Check, if we have a dirty block now, or if it was dirty already */
202 	if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) {
203 		c->dirty_size += jeb->wasted_size;
204 		c->wasted_size -= jeb->wasted_size;
205 		jeb->dirty_size += jeb->wasted_size;
206 		jeb->wasted_size = 0;
207 		if (VERYDIRTY(c, jeb->dirty_size)) {
208 			jffs2_dbg(1, "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
209 				  jeb->offset, jeb->free_size, jeb->dirty_size,
210 				  jeb->used_size);
211 			list_add_tail(&jeb->list, &c->very_dirty_list);
212 		} else {
213 			jffs2_dbg(1, "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
214 				  jeb->offset, jeb->free_size, jeb->dirty_size,
215 				  jeb->used_size);
216 			list_add_tail(&jeb->list, &c->dirty_list);
217 		}
218 	} else {
219 		jffs2_dbg(1, "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
220 			  jeb->offset, jeb->free_size, jeb->dirty_size,
221 			  jeb->used_size);
222 		list_add_tail(&jeb->list, &c->clean_list);
223 	}
224 	c->nextblock = NULL;
225 
226 }
227 
228 /* Select a new jeb for nextblock */
229 
230 static int jffs2_find_nextblock(struct jffs2_sb_info *c)
231 {
232 	struct list_head *next;
233 
234 	/* Take the next block off the 'free' list */
235 
236 	if (list_empty(&c->free_list)) {
237 
238 		if (!c->nr_erasing_blocks &&
239 			!list_empty(&c->erasable_list)) {
240 			struct jffs2_eraseblock *ejeb;
241 
242 			ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
243 			list_move_tail(&ejeb->list, &c->erase_pending_list);
244 			c->nr_erasing_blocks++;
245 			jffs2_garbage_collect_trigger(c);
246 			jffs2_dbg(1, "%s(): Triggering erase of erasable block at 0x%08x\n",
247 				  __func__, ejeb->offset);
248 		}
249 
250 		if (!c->nr_erasing_blocks &&
251 			!list_empty(&c->erasable_pending_wbuf_list)) {
252 			jffs2_dbg(1, "%s(): Flushing write buffer\n",
253 				  __func__);
254 			/* c->nextblock is NULL, no update to c->nextblock allowed */
255 			spin_unlock(&c->erase_completion_lock);
256 			jffs2_flush_wbuf_pad(c);
257 			spin_lock(&c->erase_completion_lock);
258 			/* Have another go. It'll be on the erasable_list now */
259 			return -EAGAIN;
260 		}
261 
262 		if (!c->nr_erasing_blocks) {
263 			/* Ouch. We're in GC, or we wouldn't have got here.
264 			   And there's no space left. At all. */
265 			printk(KERN_CRIT "Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
266 				   c->nr_erasing_blocks, c->nr_free_blocks, list_empty(&c->erasable_list)?"yes":"no",
267 				   list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no");
268 			return -ENOSPC;
269 		}
270 
271 		spin_unlock(&c->erase_completion_lock);
272 		/* Don't wait for it; just erase one right now */
273 		jffs2_erase_pending_blocks(c, 1);
274 		spin_lock(&c->erase_completion_lock);
275 
276 		/* An erase may have failed, decreasing the
277 		   amount of free space available. So we must
278 		   restart from the beginning */
279 		return -EAGAIN;
280 	}
281 
282 	next = c->free_list.next;
283 	list_del(next);
284 	c->nextblock = list_entry(next, struct jffs2_eraseblock, list);
285 	c->nr_free_blocks--;
286 
287 	jffs2_sum_reset_collected(c->summary); /* reset collected summary */
288 
289 #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
290 	/* adjust write buffer offset, else we get a non contiguous write bug */
291 	if (!(c->wbuf_ofs % c->sector_size) && !c->wbuf_len)
292 		c->wbuf_ofs = 0xffffffff;
293 #endif
294 
295 	jffs2_dbg(1, "%s(): new nextblock = 0x%08x\n",
296 		  __func__, c->nextblock->offset);
297 
298 	return 0;
299 }
300 
301 /* Called with alloc sem _and_ erase_completion_lock */
302 static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
303 				  uint32_t *len, uint32_t sumsize)
304 {
305 	struct jffs2_eraseblock *jeb = c->nextblock;
306 	uint32_t reserved_size;				/* for summary information at the end of the jeb */
307 	int ret;
308 
309  restart:
310 	reserved_size = 0;
311 
312 	if (jffs2_sum_active() && (sumsize != JFFS2_SUMMARY_NOSUM_SIZE)) {
313 							/* NOSUM_SIZE means not to generate summary */
314 
315 		if (jeb) {
316 			reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
317 			dbg_summary("minsize=%d , jeb->free=%d ,"
318 						"summary->size=%d , sumsize=%d\n",
319 						minsize, jeb->free_size,
320 						c->summary->sum_size, sumsize);
321 		}
322 
323 		/* Is there enough space for writing out the current node, or we have to
324 		   write out summary information now, close this jeb and select new nextblock? */
325 		if (jeb && (PAD(minsize) + PAD(c->summary->sum_size + sumsize +
326 					JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size)) {
327 
328 			/* Has summary been disabled for this jeb? */
329 			if (jffs2_sum_is_disabled(c->summary)) {
330 				sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
331 				goto restart;
332 			}
333 
334 			/* Writing out the collected summary information */
335 			dbg_summary("generating summary for 0x%08x.\n", jeb->offset);
336 			ret = jffs2_sum_write_sumnode(c);
337 
338 			if (ret)
339 				return ret;
340 
341 			if (jffs2_sum_is_disabled(c->summary)) {
342 				/* jffs2_write_sumnode() couldn't write out the summary information
343 				   diabling summary for this jeb and free the collected information
344 				 */
345 				sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
346 				goto restart;
347 			}
348 
349 			jffs2_close_nextblock(c, jeb);
350 			jeb = NULL;
351 			/* keep always valid value in reserved_size */
352 			reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
353 		}
354 	} else {
355 		if (jeb && minsize > jeb->free_size) {
356 			uint32_t waste;
357 
358 			/* Skip the end of this block and file it as having some dirty space */
359 			/* If there's a pending write to it, flush now */
360 
361 			if (jffs2_wbuf_dirty(c)) {
362 				spin_unlock(&c->erase_completion_lock);
363 				jffs2_dbg(1, "%s(): Flushing write buffer\n",
364 					  __func__);
365 				jffs2_flush_wbuf_pad(c);
366 				spin_lock(&c->erase_completion_lock);
367 				jeb = c->nextblock;
368 				goto restart;
369 			}
370 
371 			spin_unlock(&c->erase_completion_lock);
372 
373 			ret = jffs2_prealloc_raw_node_refs(c, jeb, 1);
374 			if (ret)
375 				return ret;
376 			/* Just lock it again and continue. Nothing much can change because
377 			   we hold c->alloc_sem anyway. In fact, it's not entirely clear why
378 			   we hold c->erase_completion_lock in the majority of this function...
379 			   but that's a question for another (more caffeine-rich) day. */
380 			spin_lock(&c->erase_completion_lock);
381 
382 			waste = jeb->free_size;
383 			jffs2_link_node_ref(c, jeb,
384 					    (jeb->offset + c->sector_size - waste) | REF_OBSOLETE,
385 					    waste, NULL);
386 			/* FIXME: that made it count as dirty. Convert to wasted */
387 			jeb->dirty_size -= waste;
388 			c->dirty_size -= waste;
389 			jeb->wasted_size += waste;
390 			c->wasted_size += waste;
391 
392 			jffs2_close_nextblock(c, jeb);
393 			jeb = NULL;
394 		}
395 	}
396 
397 	if (!jeb) {
398 
399 		ret = jffs2_find_nextblock(c);
400 		if (ret)
401 			return ret;
402 
403 		jeb = c->nextblock;
404 
405 		if (jeb->free_size != c->sector_size - c->cleanmarker_size) {
406 			printk(KERN_WARNING "Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n", jeb->offset, jeb->free_size);
407 			goto restart;
408 		}
409 	}
410 	/* OK, jeb (==c->nextblock) is now pointing at a block which definitely has
411 	   enough space */
412 	*len = jeb->free_size - reserved_size;
413 
414 	if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size &&
415 	    !jeb->first_node->next_in_ino) {
416 		/* Only node in it beforehand was a CLEANMARKER node (we think).
417 		   So mark it obsolete now that there's going to be another node
418 		   in the block. This will reduce used_size to zero but We've
419 		   already set c->nextblock so that jffs2_mark_node_obsolete()
420 		   won't try to refile it to the dirty_list.
421 		*/
422 		spin_unlock(&c->erase_completion_lock);
423 		jffs2_mark_node_obsolete(c, jeb->first_node);
424 		spin_lock(&c->erase_completion_lock);
425 	}
426 
427 	jffs2_dbg(1, "%s(): Giving 0x%x bytes at 0x%x\n",
428 		  __func__,
429 		  *len, jeb->offset + (c->sector_size - jeb->free_size));
430 	return 0;
431 }
432 
433 /**
434  *	jffs2_add_physical_node_ref - add a physical node reference to the list
435  *	@c: superblock info
436  *	@new: new node reference to add
437  *	@len: length of this physical node
438  *
439  *	Should only be used to report nodes for which space has been allocated
440  *	by jffs2_reserve_space.
441  *
442  *	Must be called with the alloc_sem held.
443  */
444 
445 struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c,
446 						       uint32_t ofs, uint32_t len,
447 						       struct jffs2_inode_cache *ic)
448 {
449 	struct jffs2_eraseblock *jeb;
450 	struct jffs2_raw_node_ref *new;
451 
452 	jeb = &c->blocks[ofs / c->sector_size];
453 
454 	jffs2_dbg(1, "%s(): Node at 0x%x(%d), size 0x%x\n",
455 		  __func__, ofs & ~3, ofs & 3, len);
456 #if 1
457 	/* Allow non-obsolete nodes only to be added at the end of c->nextblock,
458 	   if c->nextblock is set. Note that wbuf.c will file obsolete nodes
459 	   even after refiling c->nextblock */
460 	if ((c->nextblock || ((ofs & 3) != REF_OBSOLETE))
461 	    && (jeb != c->nextblock || (ofs & ~3) != jeb->offset + (c->sector_size - jeb->free_size))) {
462 		printk(KERN_WARNING "argh. node added in wrong place at 0x%08x(%d)\n", ofs & ~3, ofs & 3);
463 		if (c->nextblock)
464 			printk(KERN_WARNING "nextblock 0x%08x", c->nextblock->offset);
465 		else
466 			printk(KERN_WARNING "No nextblock");
467 		printk(", expected at %08x\n", jeb->offset + (c->sector_size - jeb->free_size));
468 		return ERR_PTR(-EINVAL);
469 	}
470 #endif
471 	spin_lock(&c->erase_completion_lock);
472 
473 	new = jffs2_link_node_ref(c, jeb, ofs, len, ic);
474 
475 	if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) {
476 		/* If it lives on the dirty_list, jffs2_reserve_space will put it there */
477 		jffs2_dbg(1, "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
478 			  jeb->offset, jeb->free_size, jeb->dirty_size,
479 			  jeb->used_size);
480 		if (jffs2_wbuf_dirty(c)) {
481 			/* Flush the last write in the block if it's outstanding */
482 			spin_unlock(&c->erase_completion_lock);
483 			jffs2_flush_wbuf_pad(c);
484 			spin_lock(&c->erase_completion_lock);
485 		}
486 
487 		list_add_tail(&jeb->list, &c->clean_list);
488 		c->nextblock = NULL;
489 	}
490 	jffs2_dbg_acct_sanity_check_nolock(c,jeb);
491 	jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
492 
493 	spin_unlock(&c->erase_completion_lock);
494 
495 	return new;
496 }
497 
498 
499 void jffs2_complete_reservation(struct jffs2_sb_info *c)
500 {
501 	jffs2_dbg(1, "jffs2_complete_reservation()\n");
502 	spin_lock(&c->erase_completion_lock);
503 	jffs2_garbage_collect_trigger(c);
504 	spin_unlock(&c->erase_completion_lock);
505 	mutex_unlock(&c->alloc_sem);
506 }
507 
508 static inline int on_list(struct list_head *obj, struct list_head *head)
509 {
510 	struct list_head *this;
511 
512 	list_for_each(this, head) {
513 		if (this == obj) {
514 			jffs2_dbg(1, "%p is on list at %p\n", obj, head);
515 			return 1;
516 
517 		}
518 	}
519 	return 0;
520 }
521 
522 void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref)
523 {
524 	struct jffs2_eraseblock *jeb;
525 	int blocknr;
526 	struct jffs2_unknown_node n;
527 	int ret, addedsize;
528 	size_t retlen;
529 	uint32_t freed_len;
530 
531 	if(unlikely(!ref)) {
532 		printk(KERN_NOTICE "EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
533 		return;
534 	}
535 	if (ref_obsolete(ref)) {
536 		jffs2_dbg(1, "%s(): called with already obsolete node at 0x%08x\n",
537 			  __func__, ref_offset(ref));
538 		return;
539 	}
540 	blocknr = ref->flash_offset / c->sector_size;
541 	if (blocknr >= c->nr_blocks) {
542 		printk(KERN_NOTICE "raw node at 0x%08x is off the end of device!\n", ref->flash_offset);
543 		BUG();
544 	}
545 	jeb = &c->blocks[blocknr];
546 
547 	if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) &&
548 	    !(c->flags & (JFFS2_SB_FLAG_SCANNING | JFFS2_SB_FLAG_BUILDING))) {
549 		/* Hm. This may confuse static lock analysis. If any of the above
550 		   three conditions is false, we're going to return from this
551 		   function without actually obliterating any nodes or freeing
552 		   any jffs2_raw_node_refs. So we don't need to stop erases from
553 		   happening, or protect against people holding an obsolete
554 		   jffs2_raw_node_ref without the erase_completion_lock. */
555 		mutex_lock(&c->erase_free_sem);
556 	}
557 
558 	spin_lock(&c->erase_completion_lock);
559 
560 	freed_len = ref_totlen(c, jeb, ref);
561 
562 	if (ref_flags(ref) == REF_UNCHECKED) {
563 		D1(if (unlikely(jeb->unchecked_size < freed_len)) {
564 			printk(KERN_NOTICE "raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n",
565 			       freed_len, blocknr, ref->flash_offset, jeb->used_size);
566 			BUG();
567 		})
568 			jffs2_dbg(1, "Obsoleting previously unchecked node at 0x%08x of len %x\n",
569 				  ref_offset(ref), freed_len);
570 		jeb->unchecked_size -= freed_len;
571 		c->unchecked_size -= freed_len;
572 	} else {
573 		D1(if (unlikely(jeb->used_size < freed_len)) {
574 			printk(KERN_NOTICE "raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
575 			       freed_len, blocknr, ref->flash_offset, jeb->used_size);
576 			BUG();
577 		})
578 			jffs2_dbg(1, "Obsoleting node at 0x%08x of len %#x: ",
579 				  ref_offset(ref), freed_len);
580 		jeb->used_size -= freed_len;
581 		c->used_size -= freed_len;
582 	}
583 
584 	// Take care, that wasted size is taken into concern
585 	if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + freed_len)) && jeb != c->nextblock) {
586 		jffs2_dbg(1, "Dirtying\n");
587 		addedsize = freed_len;
588 		jeb->dirty_size += freed_len;
589 		c->dirty_size += freed_len;
590 
591 		/* Convert wasted space to dirty, if not a bad block */
592 		if (jeb->wasted_size) {
593 			if (on_list(&jeb->list, &c->bad_used_list)) {
594 				jffs2_dbg(1, "Leaving block at %08x on the bad_used_list\n",
595 					  jeb->offset);
596 				addedsize = 0; /* To fool the refiling code later */
597 			} else {
598 				jffs2_dbg(1, "Converting %d bytes of wasted space to dirty in block at %08x\n",
599 					  jeb->wasted_size, jeb->offset);
600 				addedsize += jeb->wasted_size;
601 				jeb->dirty_size += jeb->wasted_size;
602 				c->dirty_size += jeb->wasted_size;
603 				c->wasted_size -= jeb->wasted_size;
604 				jeb->wasted_size = 0;
605 			}
606 		}
607 	} else {
608 		jffs2_dbg(1, "Wasting\n");
609 		addedsize = 0;
610 		jeb->wasted_size += freed_len;
611 		c->wasted_size += freed_len;
612 	}
613 	ref->flash_offset = ref_offset(ref) | REF_OBSOLETE;
614 
615 	jffs2_dbg_acct_sanity_check_nolock(c, jeb);
616 	jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
617 
618 	if (c->flags & JFFS2_SB_FLAG_SCANNING) {
619 		/* Flash scanning is in progress. Don't muck about with the block
620 		   lists because they're not ready yet, and don't actually
621 		   obliterate nodes that look obsolete. If they weren't
622 		   marked obsolete on the flash at the time they _became_
623 		   obsolete, there was probably a reason for that. */
624 		spin_unlock(&c->erase_completion_lock);
625 		/* We didn't lock the erase_free_sem */
626 		return;
627 	}
628 
629 	if (jeb == c->nextblock) {
630 		jffs2_dbg(2, "Not moving nextblock 0x%08x to dirty/erase_pending list\n",
631 			  jeb->offset);
632 	} else if (!jeb->used_size && !jeb->unchecked_size) {
633 		if (jeb == c->gcblock) {
634 			jffs2_dbg(1, "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n",
635 				  jeb->offset);
636 			c->gcblock = NULL;
637 		} else {
638 			jffs2_dbg(1, "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n",
639 				  jeb->offset);
640 			list_del(&jeb->list);
641 		}
642 		if (jffs2_wbuf_dirty(c)) {
643 			jffs2_dbg(1, "...and adding to erasable_pending_wbuf_list\n");
644 			list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list);
645 		} else {
646 			if (jiffies & 127) {
647 				/* Most of the time, we just erase it immediately. Otherwise we
648 				   spend ages scanning it on mount, etc. */
649 				jffs2_dbg(1, "...and adding to erase_pending_list\n");
650 				list_add_tail(&jeb->list, &c->erase_pending_list);
651 				c->nr_erasing_blocks++;
652 				jffs2_garbage_collect_trigger(c);
653 			} else {
654 				/* Sometimes, however, we leave it elsewhere so it doesn't get
655 				   immediately reused, and we spread the load a bit. */
656 				jffs2_dbg(1, "...and adding to erasable_list\n");
657 				list_add_tail(&jeb->list, &c->erasable_list);
658 			}
659 		}
660 		jffs2_dbg(1, "Done OK\n");
661 	} else if (jeb == c->gcblock) {
662 		jffs2_dbg(2, "Not moving gcblock 0x%08x to dirty_list\n",
663 			  jeb->offset);
664 	} else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) {
665 		jffs2_dbg(1, "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n",
666 			  jeb->offset);
667 		list_del(&jeb->list);
668 		jffs2_dbg(1, "...and adding to dirty_list\n");
669 		list_add_tail(&jeb->list, &c->dirty_list);
670 	} else if (VERYDIRTY(c, jeb->dirty_size) &&
671 		   !VERYDIRTY(c, jeb->dirty_size - addedsize)) {
672 		jffs2_dbg(1, "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n",
673 			  jeb->offset);
674 		list_del(&jeb->list);
675 		jffs2_dbg(1, "...and adding to very_dirty_list\n");
676 		list_add_tail(&jeb->list, &c->very_dirty_list);
677 	} else {
678 		jffs2_dbg(1, "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n",
679 			  jeb->offset, jeb->free_size, jeb->dirty_size,
680 			  jeb->used_size);
681 	}
682 
683 	spin_unlock(&c->erase_completion_lock);
684 
685 	if (!jffs2_can_mark_obsolete(c) || jffs2_is_readonly(c) ||
686 		(c->flags & JFFS2_SB_FLAG_BUILDING)) {
687 		/* We didn't lock the erase_free_sem */
688 		return;
689 	}
690 
691 	/* The erase_free_sem is locked, and has been since before we marked the node obsolete
692 	   and potentially put its eraseblock onto the erase_pending_list. Thus, we know that
693 	   the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet
694 	   by jffs2_free_jeb_node_refs() in erase.c. Which is nice. */
695 
696 	jffs2_dbg(1, "obliterating obsoleted node at 0x%08x\n",
697 		  ref_offset(ref));
698 	ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
699 	if (ret) {
700 		printk(KERN_WARNING "Read error reading from obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
701 		goto out_erase_sem;
702 	}
703 	if (retlen != sizeof(n)) {
704 		printk(KERN_WARNING "Short read from obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
705 		goto out_erase_sem;
706 	}
707 	if (PAD(je32_to_cpu(n.totlen)) != PAD(freed_len)) {
708 		printk(KERN_WARNING "Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n", je32_to_cpu(n.totlen), freed_len);
709 		goto out_erase_sem;
710 	}
711 	if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) {
712 		jffs2_dbg(1, "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n",
713 			  ref_offset(ref), je16_to_cpu(n.nodetype));
714 		goto out_erase_sem;
715 	}
716 	/* XXX FIXME: This is ugly now */
717 	n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE);
718 	ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
719 	if (ret) {
720 		printk(KERN_WARNING "Write error in obliterating obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
721 		goto out_erase_sem;
722 	}
723 	if (retlen != sizeof(n)) {
724 		printk(KERN_WARNING "Short write in obliterating obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
725 		goto out_erase_sem;
726 	}
727 
728 	/* Nodes which have been marked obsolete no longer need to be
729 	   associated with any inode. Remove them from the per-inode list.
730 
731 	   Note we can't do this for NAND at the moment because we need
732 	   obsolete dirent nodes to stay on the lists, because of the
733 	   horridness in jffs2_garbage_collect_deletion_dirent(). Also
734 	   because we delete the inocache, and on NAND we need that to
735 	   stay around until all the nodes are actually erased, in order
736 	   to stop us from giving the same inode number to another newly
737 	   created inode. */
738 	if (ref->next_in_ino) {
739 		struct jffs2_inode_cache *ic;
740 		struct jffs2_raw_node_ref **p;
741 
742 		spin_lock(&c->erase_completion_lock);
743 
744 		ic = jffs2_raw_ref_to_ic(ref);
745 		for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino))
746 			;
747 
748 		*p = ref->next_in_ino;
749 		ref->next_in_ino = NULL;
750 
751 		switch (ic->class) {
752 #ifdef CONFIG_JFFS2_FS_XATTR
753 			case RAWNODE_CLASS_XATTR_DATUM:
754 				jffs2_release_xattr_datum(c, (struct jffs2_xattr_datum *)ic);
755 				break;
756 			case RAWNODE_CLASS_XATTR_REF:
757 				jffs2_release_xattr_ref(c, (struct jffs2_xattr_ref *)ic);
758 				break;
759 #endif
760 			default:
761 				if (ic->nodes == (void *)ic && ic->pino_nlink == 0)
762 					jffs2_del_ino_cache(c, ic);
763 				break;
764 		}
765 		spin_unlock(&c->erase_completion_lock);
766 	}
767 
768  out_erase_sem:
769 	mutex_unlock(&c->erase_free_sem);
770 }
771 
772 int jffs2_thread_should_wake(struct jffs2_sb_info *c)
773 {
774 	int ret = 0;
775 	uint32_t dirty;
776 	int nr_very_dirty = 0;
777 	struct jffs2_eraseblock *jeb;
778 
779 	if (!list_empty(&c->erase_complete_list) ||
780 	    !list_empty(&c->erase_pending_list))
781 		return 1;
782 
783 	if (c->unchecked_size) {
784 		jffs2_dbg(1, "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n",
785 			  c->unchecked_size, c->checked_ino);
786 		return 1;
787 	}
788 
789 	/* dirty_size contains blocks on erase_pending_list
790 	 * those blocks are counted in c->nr_erasing_blocks.
791 	 * If one block is actually erased, it is not longer counted as dirty_space
792 	 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
793 	 * with c->nr_erasing_blocks * c->sector_size again.
794 	 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
795 	 * This helps us to force gc and pick eventually a clean block to spread the load.
796 	 */
797 	dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size;
798 
799 	if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger &&
800 			(dirty > c->nospc_dirty_size))
801 		ret = 1;
802 
803 	list_for_each_entry(jeb, &c->very_dirty_list, list) {
804 		nr_very_dirty++;
805 		if (nr_very_dirty == c->vdirty_blocks_gctrigger) {
806 			ret = 1;
807 			/* In debug mode, actually go through and count them all */
808 			D1(continue);
809 			break;
810 		}
811 	}
812 
813 	jffs2_dbg(1, "%s(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x, vdirty_blocks %d: %s\n",
814 		  __func__, c->nr_free_blocks, c->nr_erasing_blocks,
815 		  c->dirty_size, nr_very_dirty, ret ? "yes" : "no");
816 
817 	return ret;
818 }
819