xref: /openbmc/linux/fs/jffs2/nodemgmt.c (revision 422b1202)
1 /*
2  * JFFS2 -- Journalling Flash File System, Version 2.
3  *
4  * Copyright © 2001-2007 Red Hat, Inc.
5  *
6  * Created by David Woodhouse <dwmw2@infradead.org>
7  *
8  * For licensing information, see the file 'LICENCE' in this directory.
9  *
10  */
11 
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 #include <linux/mtd/mtd.h>
15 #include <linux/compiler.h>
16 #include <linux/sched.h> /* For cond_resched() */
17 #include "nodelist.h"
18 #include "debug.h"
19 
20 /**
21  *	jffs2_reserve_space - request physical space to write nodes to flash
22  *	@c: superblock info
23  *	@minsize: Minimum acceptable size of allocation
24  *	@len: Returned value of allocation length
25  *	@prio: Allocation type - ALLOC_{NORMAL,DELETION}
26  *
27  *	Requests a block of physical space on the flash. Returns zero for success
28  *	and puts 'len' into the appropriate place, or returns -ENOSPC or other
29  *	error if appropriate. Doesn't return len since that's
30  *
31  *	If it returns zero, jffs2_reserve_space() also downs the per-filesystem
32  *	allocation semaphore, to prevent more than one allocation from being
33  *	active at any time. The semaphore is later released by jffs2_commit_allocation()
34  *
35  *	jffs2_reserve_space() may trigger garbage collection in order to make room
36  *	for the requested allocation.
37  */
38 
39 static int jffs2_do_reserve_space(struct jffs2_sb_info *c,  uint32_t minsize,
40 				  uint32_t *len, uint32_t sumsize);
41 
42 int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
43 			uint32_t *len, int prio, uint32_t sumsize)
44 {
45 	int ret = -EAGAIN;
46 	int blocksneeded = c->resv_blocks_write;
47 	/* align it */
48 	minsize = PAD(minsize);
49 
50 	D1(printk(KERN_DEBUG "jffs2_reserve_space(): Requested 0x%x bytes\n", minsize));
51 	mutex_lock(&c->alloc_sem);
52 
53 	D1(printk(KERN_DEBUG "jffs2_reserve_space(): alloc sem got\n"));
54 
55 	spin_lock(&c->erase_completion_lock);
56 
57 	/* this needs a little more thought (true <tglx> :)) */
58 	while(ret == -EAGAIN) {
59 		while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) {
60 			uint32_t dirty, avail;
61 
62 			/* calculate real dirty size
63 			 * dirty_size contains blocks on erase_pending_list
64 			 * those blocks are counted in c->nr_erasing_blocks.
65 			 * If one block is actually erased, it is not longer counted as dirty_space
66 			 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
67 			 * with c->nr_erasing_blocks * c->sector_size again.
68 			 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
69 			 * This helps us to force gc and pick eventually a clean block to spread the load.
70 			 * We add unchecked_size here, as we hopefully will find some space to use.
71 			 * This will affect the sum only once, as gc first finishes checking
72 			 * of nodes.
73 			 */
74 			dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size;
75 			if (dirty < c->nospc_dirty_size) {
76 				if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
77 					D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on dirty space to GC, but it's a deletion. Allowing...\n"));
78 					break;
79 				}
80 				D1(printk(KERN_DEBUG "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n",
81 					  dirty, c->unchecked_size, c->sector_size));
82 
83 				spin_unlock(&c->erase_completion_lock);
84 				mutex_unlock(&c->alloc_sem);
85 				return -ENOSPC;
86 			}
87 
88 			/* Calc possibly available space. Possibly available means that we
89 			 * don't know, if unchecked size contains obsoleted nodes, which could give us some
90 			 * more usable space. This will affect the sum only once, as gc first finishes checking
91 			 * of nodes.
92 			 + Return -ENOSPC, if the maximum possibly available space is less or equal than
93 			 * blocksneeded * sector_size.
94 			 * This blocks endless gc looping on a filesystem, which is nearly full, even if
95 			 * the check above passes.
96 			 */
97 			avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size;
98 			if ( (avail / c->sector_size) <= blocksneeded) {
99 				if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
100 					D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on possibly available space, but it's a deletion. Allowing...\n"));
101 					break;
102 				}
103 
104 				D1(printk(KERN_DEBUG "max. available size 0x%08x  < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n",
105 					  avail, blocksneeded * c->sector_size));
106 				spin_unlock(&c->erase_completion_lock);
107 				mutex_unlock(&c->alloc_sem);
108 				return -ENOSPC;
109 			}
110 
111 			mutex_unlock(&c->alloc_sem);
112 
113 			D1(printk(KERN_DEBUG "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
114 				  c->nr_free_blocks, c->nr_erasing_blocks, c->free_size, c->dirty_size, c->wasted_size, c->used_size, c->erasing_size, c->bad_size,
115 				  c->free_size + c->dirty_size + c->wasted_size + c->used_size + c->erasing_size + c->bad_size, c->flash_size));
116 			spin_unlock(&c->erase_completion_lock);
117 
118 			ret = jffs2_garbage_collect_pass(c);
119 
120 			if (ret == -EAGAIN)
121 				jffs2_erase_pending_blocks(c, 1);
122 			else if (ret)
123 				return ret;
124 
125 			cond_resched();
126 
127 			if (signal_pending(current))
128 				return -EINTR;
129 
130 			mutex_lock(&c->alloc_sem);
131 			spin_lock(&c->erase_completion_lock);
132 		}
133 
134 		ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
135 		if (ret) {
136 			D1(printk(KERN_DEBUG "jffs2_reserve_space: ret is %d\n", ret));
137 		}
138 	}
139 	spin_unlock(&c->erase_completion_lock);
140 	if (!ret)
141 		ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
142 	if (ret)
143 		mutex_unlock(&c->alloc_sem);
144 	return ret;
145 }
146 
147 int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize,
148 			   uint32_t *len, uint32_t sumsize)
149 {
150 	int ret = -EAGAIN;
151 	minsize = PAD(minsize);
152 
153 	D1(printk(KERN_DEBUG "jffs2_reserve_space_gc(): Requested 0x%x bytes\n", minsize));
154 
155 	spin_lock(&c->erase_completion_lock);
156 	while(ret == -EAGAIN) {
157 		ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
158 		if (ret) {
159 			D1(printk(KERN_DEBUG "jffs2_reserve_space_gc: looping, ret is %d\n", ret));
160 		}
161 	}
162 	spin_unlock(&c->erase_completion_lock);
163 	if (!ret)
164 		ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
165 
166 	return ret;
167 }
168 
169 
170 /* Classify nextblock (clean, dirty of verydirty) and force to select an other one */
171 
172 static void jffs2_close_nextblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
173 {
174 
175 	if (c->nextblock == NULL) {
176 		D1(printk(KERN_DEBUG "jffs2_close_nextblock: Erase block at 0x%08x has already been placed in a list\n",
177 		  jeb->offset));
178 		return;
179 	}
180 	/* Check, if we have a dirty block now, or if it was dirty already */
181 	if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) {
182 		c->dirty_size += jeb->wasted_size;
183 		c->wasted_size -= jeb->wasted_size;
184 		jeb->dirty_size += jeb->wasted_size;
185 		jeb->wasted_size = 0;
186 		if (VERYDIRTY(c, jeb->dirty_size)) {
187 			D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
188 			  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
189 			list_add_tail(&jeb->list, &c->very_dirty_list);
190 		} else {
191 			D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
192 			  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
193 			list_add_tail(&jeb->list, &c->dirty_list);
194 		}
195 	} else {
196 		D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
197 		  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
198 		list_add_tail(&jeb->list, &c->clean_list);
199 	}
200 	c->nextblock = NULL;
201 
202 }
203 
204 /* Select a new jeb for nextblock */
205 
206 static int jffs2_find_nextblock(struct jffs2_sb_info *c)
207 {
208 	struct list_head *next;
209 
210 	/* Take the next block off the 'free' list */
211 
212 	if (list_empty(&c->free_list)) {
213 
214 		if (!c->nr_erasing_blocks &&
215 			!list_empty(&c->erasable_list)) {
216 			struct jffs2_eraseblock *ejeb;
217 
218 			ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
219 			list_move_tail(&ejeb->list, &c->erase_pending_list);
220 			c->nr_erasing_blocks++;
221 			jffs2_erase_pending_trigger(c);
222 			D1(printk(KERN_DEBUG "jffs2_find_nextblock: Triggering erase of erasable block at 0x%08x\n",
223 				  ejeb->offset));
224 		}
225 
226 		if (!c->nr_erasing_blocks &&
227 			!list_empty(&c->erasable_pending_wbuf_list)) {
228 			D1(printk(KERN_DEBUG "jffs2_find_nextblock: Flushing write buffer\n"));
229 			/* c->nextblock is NULL, no update to c->nextblock allowed */
230 			spin_unlock(&c->erase_completion_lock);
231 			jffs2_flush_wbuf_pad(c);
232 			spin_lock(&c->erase_completion_lock);
233 			/* Have another go. It'll be on the erasable_list now */
234 			return -EAGAIN;
235 		}
236 
237 		if (!c->nr_erasing_blocks) {
238 			/* Ouch. We're in GC, or we wouldn't have got here.
239 			   And there's no space left. At all. */
240 			printk(KERN_CRIT "Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
241 				   c->nr_erasing_blocks, c->nr_free_blocks, list_empty(&c->erasable_list)?"yes":"no",
242 				   list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no");
243 			return -ENOSPC;
244 		}
245 
246 		spin_unlock(&c->erase_completion_lock);
247 		/* Don't wait for it; just erase one right now */
248 		jffs2_erase_pending_blocks(c, 1);
249 		spin_lock(&c->erase_completion_lock);
250 
251 		/* An erase may have failed, decreasing the
252 		   amount of free space available. So we must
253 		   restart from the beginning */
254 		return -EAGAIN;
255 	}
256 
257 	next = c->free_list.next;
258 	list_del(next);
259 	c->nextblock = list_entry(next, struct jffs2_eraseblock, list);
260 	c->nr_free_blocks--;
261 
262 	jffs2_sum_reset_collected(c->summary); /* reset collected summary */
263 
264 	D1(printk(KERN_DEBUG "jffs2_find_nextblock(): new nextblock = 0x%08x\n", c->nextblock->offset));
265 
266 	return 0;
267 }
268 
269 /* Called with alloc sem _and_ erase_completion_lock */
270 static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
271 				  uint32_t *len, uint32_t sumsize)
272 {
273 	struct jffs2_eraseblock *jeb = c->nextblock;
274 	uint32_t reserved_size;				/* for summary information at the end of the jeb */
275 	int ret;
276 
277  restart:
278 	reserved_size = 0;
279 
280 	if (jffs2_sum_active() && (sumsize != JFFS2_SUMMARY_NOSUM_SIZE)) {
281 							/* NOSUM_SIZE means not to generate summary */
282 
283 		if (jeb) {
284 			reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
285 			dbg_summary("minsize=%d , jeb->free=%d ,"
286 						"summary->size=%d , sumsize=%d\n",
287 						minsize, jeb->free_size,
288 						c->summary->sum_size, sumsize);
289 		}
290 
291 		/* Is there enough space for writing out the current node, or we have to
292 		   write out summary information now, close this jeb and select new nextblock? */
293 		if (jeb && (PAD(minsize) + PAD(c->summary->sum_size + sumsize +
294 					JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size)) {
295 
296 			/* Has summary been disabled for this jeb? */
297 			if (jffs2_sum_is_disabled(c->summary)) {
298 				sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
299 				goto restart;
300 			}
301 
302 			/* Writing out the collected summary information */
303 			dbg_summary("generating summary for 0x%08x.\n", jeb->offset);
304 			ret = jffs2_sum_write_sumnode(c);
305 
306 			if (ret)
307 				return ret;
308 
309 			if (jffs2_sum_is_disabled(c->summary)) {
310 				/* jffs2_write_sumnode() couldn't write out the summary information
311 				   diabling summary for this jeb and free the collected information
312 				 */
313 				sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
314 				goto restart;
315 			}
316 
317 			jffs2_close_nextblock(c, jeb);
318 			jeb = NULL;
319 			/* keep always valid value in reserved_size */
320 			reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
321 		}
322 	} else {
323 		if (jeb && minsize > jeb->free_size) {
324 			uint32_t waste;
325 
326 			/* Skip the end of this block and file it as having some dirty space */
327 			/* If there's a pending write to it, flush now */
328 
329 			if (jffs2_wbuf_dirty(c)) {
330 				spin_unlock(&c->erase_completion_lock);
331 				D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n"));
332 				jffs2_flush_wbuf_pad(c);
333 				spin_lock(&c->erase_completion_lock);
334 				jeb = c->nextblock;
335 				goto restart;
336 			}
337 
338 			spin_unlock(&c->erase_completion_lock);
339 
340 			ret = jffs2_prealloc_raw_node_refs(c, jeb, 1);
341 			if (ret)
342 				return ret;
343 			/* Just lock it again and continue. Nothing much can change because
344 			   we hold c->alloc_sem anyway. In fact, it's not entirely clear why
345 			   we hold c->erase_completion_lock in the majority of this function...
346 			   but that's a question for another (more caffeine-rich) day. */
347 			spin_lock(&c->erase_completion_lock);
348 
349 			waste = jeb->free_size;
350 			jffs2_link_node_ref(c, jeb,
351 					    (jeb->offset + c->sector_size - waste) | REF_OBSOLETE,
352 					    waste, NULL);
353 			/* FIXME: that made it count as dirty. Convert to wasted */
354 			jeb->dirty_size -= waste;
355 			c->dirty_size -= waste;
356 			jeb->wasted_size += waste;
357 			c->wasted_size += waste;
358 
359 			jffs2_close_nextblock(c, jeb);
360 			jeb = NULL;
361 		}
362 	}
363 
364 	if (!jeb) {
365 
366 		ret = jffs2_find_nextblock(c);
367 		if (ret)
368 			return ret;
369 
370 		jeb = c->nextblock;
371 
372 		if (jeb->free_size != c->sector_size - c->cleanmarker_size) {
373 			printk(KERN_WARNING "Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n", jeb->offset, jeb->free_size);
374 			goto restart;
375 		}
376 	}
377 	/* OK, jeb (==c->nextblock) is now pointing at a block which definitely has
378 	   enough space */
379 	*len = jeb->free_size - reserved_size;
380 
381 	if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size &&
382 	    !jeb->first_node->next_in_ino) {
383 		/* Only node in it beforehand was a CLEANMARKER node (we think).
384 		   So mark it obsolete now that there's going to be another node
385 		   in the block. This will reduce used_size to zero but We've
386 		   already set c->nextblock so that jffs2_mark_node_obsolete()
387 		   won't try to refile it to the dirty_list.
388 		*/
389 		spin_unlock(&c->erase_completion_lock);
390 		jffs2_mark_node_obsolete(c, jeb->first_node);
391 		spin_lock(&c->erase_completion_lock);
392 	}
393 
394 	D1(printk(KERN_DEBUG "jffs2_do_reserve_space(): Giving 0x%x bytes at 0x%x\n",
395 		  *len, jeb->offset + (c->sector_size - jeb->free_size)));
396 	return 0;
397 }
398 
399 /**
400  *	jffs2_add_physical_node_ref - add a physical node reference to the list
401  *	@c: superblock info
402  *	@new: new node reference to add
403  *	@len: length of this physical node
404  *
405  *	Should only be used to report nodes for which space has been allocated
406  *	by jffs2_reserve_space.
407  *
408  *	Must be called with the alloc_sem held.
409  */
410 
411 struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c,
412 						       uint32_t ofs, uint32_t len,
413 						       struct jffs2_inode_cache *ic)
414 {
415 	struct jffs2_eraseblock *jeb;
416 	struct jffs2_raw_node_ref *new;
417 
418 	jeb = &c->blocks[ofs / c->sector_size];
419 
420 	D1(printk(KERN_DEBUG "jffs2_add_physical_node_ref(): Node at 0x%x(%d), size 0x%x\n",
421 		  ofs & ~3, ofs & 3, len));
422 #if 1
423 	/* Allow non-obsolete nodes only to be added at the end of c->nextblock,
424 	   if c->nextblock is set. Note that wbuf.c will file obsolete nodes
425 	   even after refiling c->nextblock */
426 	if ((c->nextblock || ((ofs & 3) != REF_OBSOLETE))
427 	    && (jeb != c->nextblock || (ofs & ~3) != jeb->offset + (c->sector_size - jeb->free_size))) {
428 		printk(KERN_WARNING "argh. node added in wrong place at 0x%08x(%d)\n", ofs & ~3, ofs & 3);
429 		if (c->nextblock)
430 			printk(KERN_WARNING "nextblock 0x%08x", c->nextblock->offset);
431 		else
432 			printk(KERN_WARNING "No nextblock");
433 		printk(", expected at %08x\n", jeb->offset + (c->sector_size - jeb->free_size));
434 		return ERR_PTR(-EINVAL);
435 	}
436 #endif
437 	spin_lock(&c->erase_completion_lock);
438 
439 	new = jffs2_link_node_ref(c, jeb, ofs, len, ic);
440 
441 	if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) {
442 		/* If it lives on the dirty_list, jffs2_reserve_space will put it there */
443 		D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
444 			  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
445 		if (jffs2_wbuf_dirty(c)) {
446 			/* Flush the last write in the block if it's outstanding */
447 			spin_unlock(&c->erase_completion_lock);
448 			jffs2_flush_wbuf_pad(c);
449 			spin_lock(&c->erase_completion_lock);
450 		}
451 
452 		list_add_tail(&jeb->list, &c->clean_list);
453 		c->nextblock = NULL;
454 	}
455 	jffs2_dbg_acct_sanity_check_nolock(c,jeb);
456 	jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
457 
458 	spin_unlock(&c->erase_completion_lock);
459 
460 	return new;
461 }
462 
463 
464 void jffs2_complete_reservation(struct jffs2_sb_info *c)
465 {
466 	D1(printk(KERN_DEBUG "jffs2_complete_reservation()\n"));
467 	jffs2_garbage_collect_trigger(c);
468 	mutex_unlock(&c->alloc_sem);
469 }
470 
471 static inline int on_list(struct list_head *obj, struct list_head *head)
472 {
473 	struct list_head *this;
474 
475 	list_for_each(this, head) {
476 		if (this == obj) {
477 			D1(printk("%p is on list at %p\n", obj, head));
478 			return 1;
479 
480 		}
481 	}
482 	return 0;
483 }
484 
485 void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref)
486 {
487 	struct jffs2_eraseblock *jeb;
488 	int blocknr;
489 	struct jffs2_unknown_node n;
490 	int ret, addedsize;
491 	size_t retlen;
492 	uint32_t freed_len;
493 
494 	if(unlikely(!ref)) {
495 		printk(KERN_NOTICE "EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
496 		return;
497 	}
498 	if (ref_obsolete(ref)) {
499 		D1(printk(KERN_DEBUG "jffs2_mark_node_obsolete called with already obsolete node at 0x%08x\n", ref_offset(ref)));
500 		return;
501 	}
502 	blocknr = ref->flash_offset / c->sector_size;
503 	if (blocknr >= c->nr_blocks) {
504 		printk(KERN_NOTICE "raw node at 0x%08x is off the end of device!\n", ref->flash_offset);
505 		BUG();
506 	}
507 	jeb = &c->blocks[blocknr];
508 
509 	if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) &&
510 	    !(c->flags & (JFFS2_SB_FLAG_SCANNING | JFFS2_SB_FLAG_BUILDING))) {
511 		/* Hm. This may confuse static lock analysis. If any of the above
512 		   three conditions is false, we're going to return from this
513 		   function without actually obliterating any nodes or freeing
514 		   any jffs2_raw_node_refs. So we don't need to stop erases from
515 		   happening, or protect against people holding an obsolete
516 		   jffs2_raw_node_ref without the erase_completion_lock. */
517 		mutex_lock(&c->erase_free_sem);
518 	}
519 
520 	spin_lock(&c->erase_completion_lock);
521 
522 	freed_len = ref_totlen(c, jeb, ref);
523 
524 	if (ref_flags(ref) == REF_UNCHECKED) {
525 		D1(if (unlikely(jeb->unchecked_size < freed_len)) {
526 			printk(KERN_NOTICE "raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n",
527 			       freed_len, blocknr, ref->flash_offset, jeb->used_size);
528 			BUG();
529 		})
530 		D1(printk(KERN_DEBUG "Obsoleting previously unchecked node at 0x%08x of len %x: ", ref_offset(ref), freed_len));
531 		jeb->unchecked_size -= freed_len;
532 		c->unchecked_size -= freed_len;
533 	} else {
534 		D1(if (unlikely(jeb->used_size < freed_len)) {
535 			printk(KERN_NOTICE "raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
536 			       freed_len, blocknr, ref->flash_offset, jeb->used_size);
537 			BUG();
538 		})
539 		D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %#x: ", ref_offset(ref), freed_len));
540 		jeb->used_size -= freed_len;
541 		c->used_size -= freed_len;
542 	}
543 
544 	// Take care, that wasted size is taken into concern
545 	if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + freed_len)) && jeb != c->nextblock) {
546 		D1(printk("Dirtying\n"));
547 		addedsize = freed_len;
548 		jeb->dirty_size += freed_len;
549 		c->dirty_size += freed_len;
550 
551 		/* Convert wasted space to dirty, if not a bad block */
552 		if (jeb->wasted_size) {
553 			if (on_list(&jeb->list, &c->bad_used_list)) {
554 				D1(printk(KERN_DEBUG "Leaving block at %08x on the bad_used_list\n",
555 					  jeb->offset));
556 				addedsize = 0; /* To fool the refiling code later */
557 			} else {
558 				D1(printk(KERN_DEBUG "Converting %d bytes of wasted space to dirty in block at %08x\n",
559 					  jeb->wasted_size, jeb->offset));
560 				addedsize += jeb->wasted_size;
561 				jeb->dirty_size += jeb->wasted_size;
562 				c->dirty_size += jeb->wasted_size;
563 				c->wasted_size -= jeb->wasted_size;
564 				jeb->wasted_size = 0;
565 			}
566 		}
567 	} else {
568 		D1(printk("Wasting\n"));
569 		addedsize = 0;
570 		jeb->wasted_size += freed_len;
571 		c->wasted_size += freed_len;
572 	}
573 	ref->flash_offset = ref_offset(ref) | REF_OBSOLETE;
574 
575 	jffs2_dbg_acct_sanity_check_nolock(c, jeb);
576 	jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
577 
578 	if (c->flags & JFFS2_SB_FLAG_SCANNING) {
579 		/* Flash scanning is in progress. Don't muck about with the block
580 		   lists because they're not ready yet, and don't actually
581 		   obliterate nodes that look obsolete. If they weren't
582 		   marked obsolete on the flash at the time they _became_
583 		   obsolete, there was probably a reason for that. */
584 		spin_unlock(&c->erase_completion_lock);
585 		/* We didn't lock the erase_free_sem */
586 		return;
587 	}
588 
589 	if (jeb == c->nextblock) {
590 		D2(printk(KERN_DEBUG "Not moving nextblock 0x%08x to dirty/erase_pending list\n", jeb->offset));
591 	} else if (!jeb->used_size && !jeb->unchecked_size) {
592 		if (jeb == c->gcblock) {
593 			D1(printk(KERN_DEBUG "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n", jeb->offset));
594 			c->gcblock = NULL;
595 		} else {
596 			D1(printk(KERN_DEBUG "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n", jeb->offset));
597 			list_del(&jeb->list);
598 		}
599 		if (jffs2_wbuf_dirty(c)) {
600 			D1(printk(KERN_DEBUG "...and adding to erasable_pending_wbuf_list\n"));
601 			list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list);
602 		} else {
603 			if (jiffies & 127) {
604 				/* Most of the time, we just erase it immediately. Otherwise we
605 				   spend ages scanning it on mount, etc. */
606 				D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
607 				list_add_tail(&jeb->list, &c->erase_pending_list);
608 				c->nr_erasing_blocks++;
609 				jffs2_erase_pending_trigger(c);
610 			} else {
611 				/* Sometimes, however, we leave it elsewhere so it doesn't get
612 				   immediately reused, and we spread the load a bit. */
613 				D1(printk(KERN_DEBUG "...and adding to erasable_list\n"));
614 				list_add_tail(&jeb->list, &c->erasable_list);
615 			}
616 		}
617 		D1(printk(KERN_DEBUG "Done OK\n"));
618 	} else if (jeb == c->gcblock) {
619 		D2(printk(KERN_DEBUG "Not moving gcblock 0x%08x to dirty_list\n", jeb->offset));
620 	} else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) {
621 		D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n", jeb->offset));
622 		list_del(&jeb->list);
623 		D1(printk(KERN_DEBUG "...and adding to dirty_list\n"));
624 		list_add_tail(&jeb->list, &c->dirty_list);
625 	} else if (VERYDIRTY(c, jeb->dirty_size) &&
626 		   !VERYDIRTY(c, jeb->dirty_size - addedsize)) {
627 		D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n", jeb->offset));
628 		list_del(&jeb->list);
629 		D1(printk(KERN_DEBUG "...and adding to very_dirty_list\n"));
630 		list_add_tail(&jeb->list, &c->very_dirty_list);
631 	} else {
632 		D1(printk(KERN_DEBUG "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n",
633 			  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
634 	}
635 
636 	spin_unlock(&c->erase_completion_lock);
637 
638 	if (!jffs2_can_mark_obsolete(c) || jffs2_is_readonly(c) ||
639 		(c->flags & JFFS2_SB_FLAG_BUILDING)) {
640 		/* We didn't lock the erase_free_sem */
641 		return;
642 	}
643 
644 	/* The erase_free_sem is locked, and has been since before we marked the node obsolete
645 	   and potentially put its eraseblock onto the erase_pending_list. Thus, we know that
646 	   the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet
647 	   by jffs2_free_jeb_node_refs() in erase.c. Which is nice. */
648 
649 	D1(printk(KERN_DEBUG "obliterating obsoleted node at 0x%08x\n", ref_offset(ref)));
650 	ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
651 	if (ret) {
652 		printk(KERN_WARNING "Read error reading from obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
653 		goto out_erase_sem;
654 	}
655 	if (retlen != sizeof(n)) {
656 		printk(KERN_WARNING "Short read from obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
657 		goto out_erase_sem;
658 	}
659 	if (PAD(je32_to_cpu(n.totlen)) != PAD(freed_len)) {
660 		printk(KERN_WARNING "Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n", je32_to_cpu(n.totlen), freed_len);
661 		goto out_erase_sem;
662 	}
663 	if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) {
664 		D1(printk(KERN_DEBUG "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n", ref_offset(ref), je16_to_cpu(n.nodetype)));
665 		goto out_erase_sem;
666 	}
667 	/* XXX FIXME: This is ugly now */
668 	n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE);
669 	ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
670 	if (ret) {
671 		printk(KERN_WARNING "Write error in obliterating obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
672 		goto out_erase_sem;
673 	}
674 	if (retlen != sizeof(n)) {
675 		printk(KERN_WARNING "Short write in obliterating obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
676 		goto out_erase_sem;
677 	}
678 
679 	/* Nodes which have been marked obsolete no longer need to be
680 	   associated with any inode. Remove them from the per-inode list.
681 
682 	   Note we can't do this for NAND at the moment because we need
683 	   obsolete dirent nodes to stay on the lists, because of the
684 	   horridness in jffs2_garbage_collect_deletion_dirent(). Also
685 	   because we delete the inocache, and on NAND we need that to
686 	   stay around until all the nodes are actually erased, in order
687 	   to stop us from giving the same inode number to another newly
688 	   created inode. */
689 	if (ref->next_in_ino) {
690 		struct jffs2_inode_cache *ic;
691 		struct jffs2_raw_node_ref **p;
692 
693 		spin_lock(&c->erase_completion_lock);
694 
695 		ic = jffs2_raw_ref_to_ic(ref);
696 		for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino))
697 			;
698 
699 		*p = ref->next_in_ino;
700 		ref->next_in_ino = NULL;
701 
702 		switch (ic->class) {
703 #ifdef CONFIG_JFFS2_FS_XATTR
704 			case RAWNODE_CLASS_XATTR_DATUM:
705 				jffs2_release_xattr_datum(c, (struct jffs2_xattr_datum *)ic);
706 				break;
707 			case RAWNODE_CLASS_XATTR_REF:
708 				jffs2_release_xattr_ref(c, (struct jffs2_xattr_ref *)ic);
709 				break;
710 #endif
711 			default:
712 				if (ic->nodes == (void *)ic && ic->nlink == 0)
713 					jffs2_del_ino_cache(c, ic);
714 				break;
715 		}
716 		spin_unlock(&c->erase_completion_lock);
717 	}
718 
719  out_erase_sem:
720 	mutex_unlock(&c->erase_free_sem);
721 }
722 
723 int jffs2_thread_should_wake(struct jffs2_sb_info *c)
724 {
725 	int ret = 0;
726 	uint32_t dirty;
727 	int nr_very_dirty = 0;
728 	struct jffs2_eraseblock *jeb;
729 
730 	if (c->unchecked_size) {
731 		D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n",
732 			  c->unchecked_size, c->checked_ino));
733 		return 1;
734 	}
735 
736 	/* dirty_size contains blocks on erase_pending_list
737 	 * those blocks are counted in c->nr_erasing_blocks.
738 	 * If one block is actually erased, it is not longer counted as dirty_space
739 	 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
740 	 * with c->nr_erasing_blocks * c->sector_size again.
741 	 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
742 	 * This helps us to force gc and pick eventually a clean block to spread the load.
743 	 */
744 	dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size;
745 
746 	if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger &&
747 			(dirty > c->nospc_dirty_size))
748 		ret = 1;
749 
750 	list_for_each_entry(jeb, &c->very_dirty_list, list) {
751 		nr_very_dirty++;
752 		if (nr_very_dirty == c->vdirty_blocks_gctrigger) {
753 			ret = 1;
754 			/* In debug mode, actually go through and count them all */
755 			D1(continue);
756 			break;
757 		}
758 	}
759 
760 	D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x, vdirty_blocks %d: %s\n",
761 		  c->nr_free_blocks, c->nr_erasing_blocks, c->dirty_size, nr_very_dirty, ret?"yes":"no"));
762 
763 	return ret;
764 }
765