xref: /openbmc/linux/drivers/mtd/ubi/wl.c (revision 565d76cb)
1 /*
2  * Copyright (c) International Business Machines Corp., 2006
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
12  * the GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17  *
18  * Authors: Artem Bityutskiy (Битюцкий Артём), Thomas Gleixner
19  */
20 
21 /*
22  * UBI wear-leveling sub-system.
23  *
24  * This sub-system is responsible for wear-leveling. It works in terms of
25  * physical eraseblocks and erase counters and knows nothing about logical
26  * eraseblocks, volumes, etc. From this sub-system's perspective all physical
27  * eraseblocks are of two types - used and free. Used physical eraseblocks are
28  * those that were "get" by the 'ubi_wl_get_peb()' function, and free physical
29  * eraseblocks are those that were put by the 'ubi_wl_put_peb()' function.
30  *
31  * Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter
32  * header. The rest of the physical eraseblock contains only %0xFF bytes.
33  *
34  * When physical eraseblocks are returned to the WL sub-system by means of the
35  * 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is
36  * done asynchronously in context of the per-UBI device background thread,
37  * which is also managed by the WL sub-system.
38  *
39  * The wear-leveling is ensured by means of moving the contents of used
40  * physical eraseblocks with low erase counter to free physical eraseblocks
41  * with high erase counter.
42  *
43  * The 'ubi_wl_get_peb()' function accepts data type hints which help to pick
44  * an "optimal" physical eraseblock. For example, when it is known that the
45  * physical eraseblock will be "put" soon because it contains short-term data,
46  * the WL sub-system may pick a free physical eraseblock with low erase
47  * counter, and so forth.
48  *
49  * If the WL sub-system fails to erase a physical eraseblock, it marks it as
50  * bad.
51  *
52  * This sub-system is also responsible for scrubbing. If a bit-flip is detected
53  * in a physical eraseblock, it has to be moved. Technically this is the same
54  * as moving it for wear-leveling reasons.
55  *
56  * As it was said, for the UBI sub-system all physical eraseblocks are either
57  * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while
58  * used eraseblocks are kept in @wl->used, @wl->erroneous, or @wl->scrub
59  * RB-trees, as well as (temporarily) in the @wl->pq queue.
60  *
61  * When the WL sub-system returns a physical eraseblock, the physical
62  * eraseblock is protected from being moved for some "time". For this reason,
63  * the physical eraseblock is not directly moved from the @wl->free tree to the
64  * @wl->used tree. There is a protection queue in between where this
65  * physical eraseblock is temporarily stored (@wl->pq).
66  *
67  * All this protection stuff is needed because:
68  *  o we don't want to move physical eraseblocks just after we have given them
69  *    to the user; instead, we first want to let users fill them up with data;
70  *
71  *  o there is a chance that the user will put the physical eraseblock very
72  *    soon, so it makes sense not to move it for some time, but wait; this is
73  *    especially important in case of "short term" physical eraseblocks.
74  *
75  * Physical eraseblocks stay protected only for limited time. But the "time" is
76  * measured in erase cycles in this case. This is implemented with help of the
77  * protection queue. Eraseblocks are put to the tail of this queue when they
78  * are returned by the 'ubi_wl_get_peb()', and eraseblocks are removed from the
79  * head of the queue on each erase operation (for any eraseblock). So the
80  * length of the queue defines how may (global) erase cycles PEBs are protected.
81  *
82  * To put it differently, each physical eraseblock has 2 main states: free and
83  * used. The former state corresponds to the @wl->free tree. The latter state
84  * is split up on several sub-states:
85  * o the WL movement is allowed (@wl->used tree);
86  * o the WL movement is disallowed (@wl->erroneous) because the PEB is
87  *   erroneous - e.g., there was a read error;
88  * o the WL movement is temporarily prohibited (@wl->pq queue);
89  * o scrubbing is needed (@wl->scrub tree).
90  *
91  * Depending on the sub-state, wear-leveling entries of the used physical
92  * eraseblocks may be kept in one of those structures.
93  *
94  * Note, in this implementation, we keep a small in-RAM object for each physical
95  * eraseblock. This is surely not a scalable solution. But it appears to be good
96  * enough for moderately large flashes and it is simple. In future, one may
97  * re-work this sub-system and make it more scalable.
98  *
99  * At the moment this sub-system does not utilize the sequence number, which
100  * was introduced relatively recently. But it would be wise to do this because
101  * the sequence number of a logical eraseblock characterizes how old is it. For
102  * example, when we move a PEB with low erase counter, and we need to pick the
103  * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we
104  * pick target PEB with an average EC if our PEB is not very "old". This is a
105  * room for future re-works of the WL sub-system.
106  */
107 
108 #include <linux/slab.h>
109 #include <linux/crc32.h>
110 #include <linux/freezer.h>
111 #include <linux/kthread.h>
112 #include "ubi.h"
113 
114 /* Number of physical eraseblocks reserved for wear-leveling purposes */
115 #define WL_RESERVED_PEBS 1
116 
117 /*
118  * Maximum difference between two erase counters. If this threshold is
119  * exceeded, the WL sub-system starts moving data from used physical
120  * eraseblocks with low erase counter to free physical eraseblocks with high
121  * erase counter.
122  */
123 #define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD
124 
125 /*
126  * When a physical eraseblock is moved, the WL sub-system has to pick the target
127  * physical eraseblock to move to. The simplest way would be just to pick the
128  * one with the highest erase counter. But in certain workloads this could lead
129  * to an unlimited wear of one or few physical eraseblock. Indeed, imagine a
130  * situation when the picked physical eraseblock is constantly erased after the
131  * data is written to it. So, we have a constant which limits the highest erase
132  * counter of the free physical eraseblock to pick. Namely, the WL sub-system
133  * does not pick eraseblocks with erase counter greater than the lowest erase
134  * counter plus %WL_FREE_MAX_DIFF.
135  */
136 #define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD)
137 
138 /*
139  * Maximum number of consecutive background thread failures which is enough to
140  * switch to read-only mode.
141  */
142 #define WL_MAX_FAILURES 32
143 
144 /**
145  * struct ubi_work - UBI work description data structure.
146  * @list: a link in the list of pending works
147  * @func: worker function
148  * @e: physical eraseblock to erase
149  * @torture: if the physical eraseblock has to be tortured
150  *
151  * The @func pointer points to the worker function. If the @cancel argument is
152  * not zero, the worker has to free the resources and exit immediately. The
153  * worker has to return zero in case of success and a negative error code in
154  * case of failure.
155  */
156 struct ubi_work {
157 	struct list_head list;
158 	int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int cancel);
159 	/* The below fields are only relevant to erasure works */
160 	struct ubi_wl_entry *e;
161 	int torture;
162 };
163 
164 #ifdef CONFIG_MTD_UBI_DEBUG
165 static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec);
166 static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
167 				     struct rb_root *root);
168 static int paranoid_check_in_pq(struct ubi_device *ubi, struct ubi_wl_entry *e);
169 #else
170 #define paranoid_check_ec(ubi, pnum, ec) 0
171 #define paranoid_check_in_wl_tree(e, root)
172 #define paranoid_check_in_pq(ubi, e) 0
173 #endif
174 
175 /**
176  * wl_tree_add - add a wear-leveling entry to a WL RB-tree.
177  * @e: the wear-leveling entry to add
178  * @root: the root of the tree
179  *
180  * Note, we use (erase counter, physical eraseblock number) pairs as keys in
181  * the @ubi->used and @ubi->free RB-trees.
182  */
183 static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
184 {
185 	struct rb_node **p, *parent = NULL;
186 
187 	p = &root->rb_node;
188 	while (*p) {
189 		struct ubi_wl_entry *e1;
190 
191 		parent = *p;
192 		e1 = rb_entry(parent, struct ubi_wl_entry, u.rb);
193 
194 		if (e->ec < e1->ec)
195 			p = &(*p)->rb_left;
196 		else if (e->ec > e1->ec)
197 			p = &(*p)->rb_right;
198 		else {
199 			ubi_assert(e->pnum != e1->pnum);
200 			if (e->pnum < e1->pnum)
201 				p = &(*p)->rb_left;
202 			else
203 				p = &(*p)->rb_right;
204 		}
205 	}
206 
207 	rb_link_node(&e->u.rb, parent, p);
208 	rb_insert_color(&e->u.rb, root);
209 }
210 
211 /**
212  * do_work - do one pending work.
213  * @ubi: UBI device description object
214  *
215  * This function returns zero in case of success and a negative error code in
216  * case of failure.
217  */
218 static int do_work(struct ubi_device *ubi)
219 {
220 	int err;
221 	struct ubi_work *wrk;
222 
223 	cond_resched();
224 
225 	/*
226 	 * @ubi->work_sem is used to synchronize with the workers. Workers take
227 	 * it in read mode, so many of them may be doing works at a time. But
228 	 * the queue flush code has to be sure the whole queue of works is
229 	 * done, and it takes the mutex in write mode.
230 	 */
231 	down_read(&ubi->work_sem);
232 	spin_lock(&ubi->wl_lock);
233 	if (list_empty(&ubi->works)) {
234 		spin_unlock(&ubi->wl_lock);
235 		up_read(&ubi->work_sem);
236 		return 0;
237 	}
238 
239 	wrk = list_entry(ubi->works.next, struct ubi_work, list);
240 	list_del(&wrk->list);
241 	ubi->works_count -= 1;
242 	ubi_assert(ubi->works_count >= 0);
243 	spin_unlock(&ubi->wl_lock);
244 
245 	/*
246 	 * Call the worker function. Do not touch the work structure
247 	 * after this call as it will have been freed or reused by that
248 	 * time by the worker function.
249 	 */
250 	err = wrk->func(ubi, wrk, 0);
251 	if (err)
252 		ubi_err("work failed with error code %d", err);
253 	up_read(&ubi->work_sem);
254 
255 	return err;
256 }
257 
258 /**
259  * produce_free_peb - produce a free physical eraseblock.
260  * @ubi: UBI device description object
261  *
262  * This function tries to make a free PEB by means of synchronous execution of
263  * pending works. This may be needed if, for example the background thread is
264  * disabled. Returns zero in case of success and a negative error code in case
265  * of failure.
266  */
267 static int produce_free_peb(struct ubi_device *ubi)
268 {
269 	int err;
270 
271 	spin_lock(&ubi->wl_lock);
272 	while (!ubi->free.rb_node) {
273 		spin_unlock(&ubi->wl_lock);
274 
275 		dbg_wl("do one work synchronously");
276 		err = do_work(ubi);
277 		if (err)
278 			return err;
279 
280 		spin_lock(&ubi->wl_lock);
281 	}
282 	spin_unlock(&ubi->wl_lock);
283 
284 	return 0;
285 }
286 
287 /**
288  * in_wl_tree - check if wear-leveling entry is present in a WL RB-tree.
289  * @e: the wear-leveling entry to check
290  * @root: the root of the tree
291  *
292  * This function returns non-zero if @e is in the @root RB-tree and zero if it
293  * is not.
294  */
295 static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
296 {
297 	struct rb_node *p;
298 
299 	p = root->rb_node;
300 	while (p) {
301 		struct ubi_wl_entry *e1;
302 
303 		e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
304 
305 		if (e->pnum == e1->pnum) {
306 			ubi_assert(e == e1);
307 			return 1;
308 		}
309 
310 		if (e->ec < e1->ec)
311 			p = p->rb_left;
312 		else if (e->ec > e1->ec)
313 			p = p->rb_right;
314 		else {
315 			ubi_assert(e->pnum != e1->pnum);
316 			if (e->pnum < e1->pnum)
317 				p = p->rb_left;
318 			else
319 				p = p->rb_right;
320 		}
321 	}
322 
323 	return 0;
324 }
325 
326 /**
327  * prot_queue_add - add physical eraseblock to the protection queue.
328  * @ubi: UBI device description object
329  * @e: the physical eraseblock to add
330  *
331  * This function adds @e to the tail of the protection queue @ubi->pq, where
332  * @e will stay for %UBI_PROT_QUEUE_LEN erase operations and will be
333  * temporarily protected from the wear-leveling worker. Note, @wl->lock has to
334  * be locked.
335  */
336 static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
337 {
338 	int pq_tail = ubi->pq_head - 1;
339 
340 	if (pq_tail < 0)
341 		pq_tail = UBI_PROT_QUEUE_LEN - 1;
342 	ubi_assert(pq_tail >= 0 && pq_tail < UBI_PROT_QUEUE_LEN);
343 	list_add_tail(&e->u.list, &ubi->pq[pq_tail]);
344 	dbg_wl("added PEB %d EC %d to the protection queue", e->pnum, e->ec);
345 }
346 
347 /**
348  * find_wl_entry - find wear-leveling entry closest to certain erase counter.
349  * @root: the RB-tree where to look for
350  * @max: highest possible erase counter
351  *
352  * This function looks for a wear leveling entry with erase counter closest to
353  * @max and less than @max.
354  */
355 static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max)
356 {
357 	struct rb_node *p;
358 	struct ubi_wl_entry *e;
359 
360 	e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
361 	max += e->ec;
362 
363 	p = root->rb_node;
364 	while (p) {
365 		struct ubi_wl_entry *e1;
366 
367 		e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
368 		if (e1->ec >= max)
369 			p = p->rb_left;
370 		else {
371 			p = p->rb_right;
372 			e = e1;
373 		}
374 	}
375 
376 	return e;
377 }
378 
379 /**
380  * ubi_wl_get_peb - get a physical eraseblock.
381  * @ubi: UBI device description object
382  * @dtype: type of data which will be stored in this physical eraseblock
383  *
384  * This function returns a physical eraseblock in case of success and a
385  * negative error code in case of failure. Might sleep.
386  */
387 int ubi_wl_get_peb(struct ubi_device *ubi, int dtype)
388 {
389 	int err, medium_ec;
390 	struct ubi_wl_entry *e, *first, *last;
391 
392 	ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM ||
393 		   dtype == UBI_UNKNOWN);
394 
395 retry:
396 	spin_lock(&ubi->wl_lock);
397 	if (!ubi->free.rb_node) {
398 		if (ubi->works_count == 0) {
399 			ubi_assert(list_empty(&ubi->works));
400 			ubi_err("no free eraseblocks");
401 			spin_unlock(&ubi->wl_lock);
402 			return -ENOSPC;
403 		}
404 		spin_unlock(&ubi->wl_lock);
405 
406 		err = produce_free_peb(ubi);
407 		if (err < 0)
408 			return err;
409 		goto retry;
410 	}
411 
412 	switch (dtype) {
413 	case UBI_LONGTERM:
414 		/*
415 		 * For long term data we pick a physical eraseblock with high
416 		 * erase counter. But the highest erase counter we can pick is
417 		 * bounded by the the lowest erase counter plus
418 		 * %WL_FREE_MAX_DIFF.
419 		 */
420 		e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
421 		break;
422 	case UBI_UNKNOWN:
423 		/*
424 		 * For unknown data we pick a physical eraseblock with medium
425 		 * erase counter. But we by no means can pick a physical
426 		 * eraseblock with erase counter greater or equivalent than the
427 		 * lowest erase counter plus %WL_FREE_MAX_DIFF.
428 		 */
429 		first = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry,
430 					u.rb);
431 		last = rb_entry(rb_last(&ubi->free), struct ubi_wl_entry, u.rb);
432 
433 		if (last->ec - first->ec < WL_FREE_MAX_DIFF)
434 			e = rb_entry(ubi->free.rb_node,
435 					struct ubi_wl_entry, u.rb);
436 		else {
437 			medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2;
438 			e = find_wl_entry(&ubi->free, medium_ec);
439 		}
440 		break;
441 	case UBI_SHORTTERM:
442 		/*
443 		 * For short term data we pick a physical eraseblock with the
444 		 * lowest erase counter as we expect it will be erased soon.
445 		 */
446 		e = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, u.rb);
447 		break;
448 	default:
449 		BUG();
450 	}
451 
452 	paranoid_check_in_wl_tree(e, &ubi->free);
453 
454 	/*
455 	 * Move the physical eraseblock to the protection queue where it will
456 	 * be protected from being moved for some time.
457 	 */
458 	rb_erase(&e->u.rb, &ubi->free);
459 	dbg_wl("PEB %d EC %d", e->pnum, e->ec);
460 	prot_queue_add(ubi, e);
461 	spin_unlock(&ubi->wl_lock);
462 
463 	err = ubi_dbg_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset,
464 				   ubi->peb_size - ubi->vid_hdr_aloffset);
465 	if (err) {
466 		ubi_err("new PEB %d does not contain all 0xFF bytes", e->pnum);
467 		return err;
468 	}
469 
470 	return e->pnum;
471 }
472 
473 /**
474  * prot_queue_del - remove a physical eraseblock from the protection queue.
475  * @ubi: UBI device description object
476  * @pnum: the physical eraseblock to remove
477  *
478  * This function deletes PEB @pnum from the protection queue and returns zero
479  * in case of success and %-ENODEV if the PEB was not found.
480  */
481 static int prot_queue_del(struct ubi_device *ubi, int pnum)
482 {
483 	struct ubi_wl_entry *e;
484 
485 	e = ubi->lookuptbl[pnum];
486 	if (!e)
487 		return -ENODEV;
488 
489 	if (paranoid_check_in_pq(ubi, e))
490 		return -ENODEV;
491 
492 	list_del(&e->u.list);
493 	dbg_wl("deleted PEB %d from the protection queue", e->pnum);
494 	return 0;
495 }
496 
497 /**
498  * sync_erase - synchronously erase a physical eraseblock.
499  * @ubi: UBI device description object
500  * @e: the the physical eraseblock to erase
501  * @torture: if the physical eraseblock has to be tortured
502  *
503  * This function returns zero in case of success and a negative error code in
504  * case of failure.
505  */
506 static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
507 		      int torture)
508 {
509 	int err;
510 	struct ubi_ec_hdr *ec_hdr;
511 	unsigned long long ec = e->ec;
512 
513 	dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec);
514 
515 	err = paranoid_check_ec(ubi, e->pnum, e->ec);
516 	if (err)
517 		return -EINVAL;
518 
519 	ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
520 	if (!ec_hdr)
521 		return -ENOMEM;
522 
523 	err = ubi_io_sync_erase(ubi, e->pnum, torture);
524 	if (err < 0)
525 		goto out_free;
526 
527 	ec += err;
528 	if (ec > UBI_MAX_ERASECOUNTER) {
529 		/*
530 		 * Erase counter overflow. Upgrade UBI and use 64-bit
531 		 * erase counters internally.
532 		 */
533 		ubi_err("erase counter overflow at PEB %d, EC %llu",
534 			e->pnum, ec);
535 		err = -EINVAL;
536 		goto out_free;
537 	}
538 
539 	dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec);
540 
541 	ec_hdr->ec = cpu_to_be64(ec);
542 
543 	err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr);
544 	if (err)
545 		goto out_free;
546 
547 	e->ec = ec;
548 	spin_lock(&ubi->wl_lock);
549 	if (e->ec > ubi->max_ec)
550 		ubi->max_ec = e->ec;
551 	spin_unlock(&ubi->wl_lock);
552 
553 out_free:
554 	kfree(ec_hdr);
555 	return err;
556 }
557 
558 /**
559  * serve_prot_queue - check if it is time to stop protecting PEBs.
560  * @ubi: UBI device description object
561  *
562  * This function is called after each erase operation and removes PEBs from the
563  * tail of the protection queue. These PEBs have been protected for long enough
564  * and should be moved to the used tree.
565  */
566 static void serve_prot_queue(struct ubi_device *ubi)
567 {
568 	struct ubi_wl_entry *e, *tmp;
569 	int count;
570 
571 	/*
572 	 * There may be several protected physical eraseblock to remove,
573 	 * process them all.
574 	 */
575 repeat:
576 	count = 0;
577 	spin_lock(&ubi->wl_lock);
578 	list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) {
579 		dbg_wl("PEB %d EC %d protection over, move to used tree",
580 			e->pnum, e->ec);
581 
582 		list_del(&e->u.list);
583 		wl_tree_add(e, &ubi->used);
584 		if (count++ > 32) {
585 			/*
586 			 * Let's be nice and avoid holding the spinlock for
587 			 * too long.
588 			 */
589 			spin_unlock(&ubi->wl_lock);
590 			cond_resched();
591 			goto repeat;
592 		}
593 	}
594 
595 	ubi->pq_head += 1;
596 	if (ubi->pq_head == UBI_PROT_QUEUE_LEN)
597 		ubi->pq_head = 0;
598 	ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN);
599 	spin_unlock(&ubi->wl_lock);
600 }
601 
602 /**
603  * schedule_ubi_work - schedule a work.
604  * @ubi: UBI device description object
605  * @wrk: the work to schedule
606  *
607  * This function adds a work defined by @wrk to the tail of the pending works
608  * list.
609  */
610 static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
611 {
612 	spin_lock(&ubi->wl_lock);
613 	list_add_tail(&wrk->list, &ubi->works);
614 	ubi_assert(ubi->works_count >= 0);
615 	ubi->works_count += 1;
616 	if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled())
617 		wake_up_process(ubi->bgt_thread);
618 	spin_unlock(&ubi->wl_lock);
619 }
620 
621 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
622 			int cancel);
623 
624 /**
625  * schedule_erase - schedule an erase work.
626  * @ubi: UBI device description object
627  * @e: the WL entry of the physical eraseblock to erase
628  * @torture: if the physical eraseblock has to be tortured
629  *
630  * This function returns zero in case of success and a %-ENOMEM in case of
631  * failure.
632  */
633 static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
634 			  int torture)
635 {
636 	struct ubi_work *wl_wrk;
637 
638 	dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
639 	       e->pnum, e->ec, torture);
640 
641 	wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
642 	if (!wl_wrk)
643 		return -ENOMEM;
644 
645 	wl_wrk->func = &erase_worker;
646 	wl_wrk->e = e;
647 	wl_wrk->torture = torture;
648 
649 	schedule_ubi_work(ubi, wl_wrk);
650 	return 0;
651 }
652 
653 /**
654  * wear_leveling_worker - wear-leveling worker function.
655  * @ubi: UBI device description object
656  * @wrk: the work object
657  * @cancel: non-zero if the worker has to free memory and exit
658  *
659  * This function copies a more worn out physical eraseblock to a less worn out
660  * one. Returns zero in case of success and a negative error code in case of
661  * failure.
662  */
663 static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
664 				int cancel)
665 {
666 	int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
667 	int vol_id = -1, uninitialized_var(lnum);
668 	struct ubi_wl_entry *e1, *e2;
669 	struct ubi_vid_hdr *vid_hdr;
670 
671 	kfree(wrk);
672 	if (cancel)
673 		return 0;
674 
675 	vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
676 	if (!vid_hdr)
677 		return -ENOMEM;
678 
679 	mutex_lock(&ubi->move_mutex);
680 	spin_lock(&ubi->wl_lock);
681 	ubi_assert(!ubi->move_from && !ubi->move_to);
682 	ubi_assert(!ubi->move_to_put);
683 
684 	if (!ubi->free.rb_node ||
685 	    (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
686 		/*
687 		 * No free physical eraseblocks? Well, they must be waiting in
688 		 * the queue to be erased. Cancel movement - it will be
689 		 * triggered again when a free physical eraseblock appears.
690 		 *
691 		 * No used physical eraseblocks? They must be temporarily
692 		 * protected from being moved. They will be moved to the
693 		 * @ubi->used tree later and the wear-leveling will be
694 		 * triggered again.
695 		 */
696 		dbg_wl("cancel WL, a list is empty: free %d, used %d",
697 		       !ubi->free.rb_node, !ubi->used.rb_node);
698 		goto out_cancel;
699 	}
700 
701 	if (!ubi->scrub.rb_node) {
702 		/*
703 		 * Now pick the least worn-out used physical eraseblock and a
704 		 * highly worn-out free physical eraseblock. If the erase
705 		 * counters differ much enough, start wear-leveling.
706 		 */
707 		e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
708 		e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
709 
710 		if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
711 			dbg_wl("no WL needed: min used EC %d, max free EC %d",
712 			       e1->ec, e2->ec);
713 			goto out_cancel;
714 		}
715 		paranoid_check_in_wl_tree(e1, &ubi->used);
716 		rb_erase(&e1->u.rb, &ubi->used);
717 		dbg_wl("move PEB %d EC %d to PEB %d EC %d",
718 		       e1->pnum, e1->ec, e2->pnum, e2->ec);
719 	} else {
720 		/* Perform scrubbing */
721 		scrubbing = 1;
722 		e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
723 		e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
724 		paranoid_check_in_wl_tree(e1, &ubi->scrub);
725 		rb_erase(&e1->u.rb, &ubi->scrub);
726 		dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
727 	}
728 
729 	paranoid_check_in_wl_tree(e2, &ubi->free);
730 	rb_erase(&e2->u.rb, &ubi->free);
731 	ubi->move_from = e1;
732 	ubi->move_to = e2;
733 	spin_unlock(&ubi->wl_lock);
734 
735 	/*
736 	 * Now we are going to copy physical eraseblock @e1->pnum to @e2->pnum.
737 	 * We so far do not know which logical eraseblock our physical
738 	 * eraseblock (@e1) belongs to. We have to read the volume identifier
739 	 * header first.
740 	 *
741 	 * Note, we are protected from this PEB being unmapped and erased. The
742 	 * 'ubi_wl_put_peb()' would wait for moving to be finished if the PEB
743 	 * which is being moved was unmapped.
744 	 */
745 
746 	err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
747 	if (err && err != UBI_IO_BITFLIPS) {
748 		if (err == UBI_IO_FF) {
749 			/*
750 			 * We are trying to move PEB without a VID header. UBI
751 			 * always write VID headers shortly after the PEB was
752 			 * given, so we have a situation when it has not yet
753 			 * had a chance to write it, because it was preempted.
754 			 * So add this PEB to the protection queue so far,
755 			 * because presumably more data will be written there
756 			 * (including the missing VID header), and then we'll
757 			 * move it.
758 			 */
759 			dbg_wl("PEB %d has no VID header", e1->pnum);
760 			protect = 1;
761 			goto out_not_moved;
762 		} else if (err == UBI_IO_FF_BITFLIPS) {
763 			/*
764 			 * The same situation as %UBI_IO_FF, but bit-flips were
765 			 * detected. It is better to schedule this PEB for
766 			 * scrubbing.
767 			 */
768 			dbg_wl("PEB %d has no VID header but has bit-flips",
769 			       e1->pnum);
770 			scrubbing = 1;
771 			goto out_not_moved;
772 		}
773 
774 		ubi_err("error %d while reading VID header from PEB %d",
775 			err, e1->pnum);
776 		goto out_error;
777 	}
778 
779 	vol_id = be32_to_cpu(vid_hdr->vol_id);
780 	lnum = be32_to_cpu(vid_hdr->lnum);
781 
782 	err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
783 	if (err) {
784 		if (err == MOVE_CANCEL_RACE) {
785 			/*
786 			 * The LEB has not been moved because the volume is
787 			 * being deleted or the PEB has been put meanwhile. We
788 			 * should prevent this PEB from being selected for
789 			 * wear-leveling movement again, so put it to the
790 			 * protection queue.
791 			 */
792 			protect = 1;
793 			goto out_not_moved;
794 		}
795 
796 		if (err == MOVE_CANCEL_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
797 		    err == MOVE_TARGET_RD_ERR) {
798 			/*
799 			 * Target PEB had bit-flips or write error - torture it.
800 			 */
801 			torture = 1;
802 			goto out_not_moved;
803 		}
804 
805 		if (err == MOVE_SOURCE_RD_ERR) {
806 			/*
807 			 * An error happened while reading the source PEB. Do
808 			 * not switch to R/O mode in this case, and give the
809 			 * upper layers a possibility to recover from this,
810 			 * e.g. by unmapping corresponding LEB. Instead, just
811 			 * put this PEB to the @ubi->erroneous list to prevent
812 			 * UBI from trying to move it over and over again.
813 			 */
814 			if (ubi->erroneous_peb_count > ubi->max_erroneous) {
815 				ubi_err("too many erroneous eraseblocks (%d)",
816 					ubi->erroneous_peb_count);
817 				goto out_error;
818 			}
819 			erroneous = 1;
820 			goto out_not_moved;
821 		}
822 
823 		if (err < 0)
824 			goto out_error;
825 
826 		ubi_assert(0);
827 	}
828 
829 	/* The PEB has been successfully moved */
830 	if (scrubbing)
831 		ubi_msg("scrubbed PEB %d (LEB %d:%d), data moved to PEB %d",
832 			e1->pnum, vol_id, lnum, e2->pnum);
833 	ubi_free_vid_hdr(ubi, vid_hdr);
834 
835 	spin_lock(&ubi->wl_lock);
836 	if (!ubi->move_to_put) {
837 		wl_tree_add(e2, &ubi->used);
838 		e2 = NULL;
839 	}
840 	ubi->move_from = ubi->move_to = NULL;
841 	ubi->move_to_put = ubi->wl_scheduled = 0;
842 	spin_unlock(&ubi->wl_lock);
843 
844 	err = schedule_erase(ubi, e1, 0);
845 	if (err) {
846 		kmem_cache_free(ubi_wl_entry_slab, e1);
847 		if (e2)
848 			kmem_cache_free(ubi_wl_entry_slab, e2);
849 		goto out_ro;
850 	}
851 
852 	if (e2) {
853 		/*
854 		 * Well, the target PEB was put meanwhile, schedule it for
855 		 * erasure.
856 		 */
857 		dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase",
858 		       e2->pnum, vol_id, lnum);
859 		err = schedule_erase(ubi, e2, 0);
860 		if (err) {
861 			kmem_cache_free(ubi_wl_entry_slab, e2);
862 			goto out_ro;
863 		}
864 	}
865 
866 	dbg_wl("done");
867 	mutex_unlock(&ubi->move_mutex);
868 	return 0;
869 
870 	/*
871 	 * For some reasons the LEB was not moved, might be an error, might be
872 	 * something else. @e1 was not changed, so return it back. @e2 might
873 	 * have been changed, schedule it for erasure.
874 	 */
875 out_not_moved:
876 	if (vol_id != -1)
877 		dbg_wl("cancel moving PEB %d (LEB %d:%d) to PEB %d (%d)",
878 		       e1->pnum, vol_id, lnum, e2->pnum, err);
879 	else
880 		dbg_wl("cancel moving PEB %d to PEB %d (%d)",
881 		       e1->pnum, e2->pnum, err);
882 	spin_lock(&ubi->wl_lock);
883 	if (protect)
884 		prot_queue_add(ubi, e1);
885 	else if (erroneous) {
886 		wl_tree_add(e1, &ubi->erroneous);
887 		ubi->erroneous_peb_count += 1;
888 	} else if (scrubbing)
889 		wl_tree_add(e1, &ubi->scrub);
890 	else
891 		wl_tree_add(e1, &ubi->used);
892 	ubi_assert(!ubi->move_to_put);
893 	ubi->move_from = ubi->move_to = NULL;
894 	ubi->wl_scheduled = 0;
895 	spin_unlock(&ubi->wl_lock);
896 
897 	ubi_free_vid_hdr(ubi, vid_hdr);
898 	err = schedule_erase(ubi, e2, torture);
899 	if (err) {
900 		kmem_cache_free(ubi_wl_entry_slab, e2);
901 		goto out_ro;
902 	}
903 	mutex_unlock(&ubi->move_mutex);
904 	return 0;
905 
906 out_error:
907 	if (vol_id != -1)
908 		ubi_err("error %d while moving PEB %d to PEB %d",
909 			err, e1->pnum, e2->pnum);
910 	else
911 		ubi_err("error %d while moving PEB %d (LEB %d:%d) to PEB %d",
912 			err, e1->pnum, vol_id, lnum, e2->pnum);
913 	spin_lock(&ubi->wl_lock);
914 	ubi->move_from = ubi->move_to = NULL;
915 	ubi->move_to_put = ubi->wl_scheduled = 0;
916 	spin_unlock(&ubi->wl_lock);
917 
918 	ubi_free_vid_hdr(ubi, vid_hdr);
919 	kmem_cache_free(ubi_wl_entry_slab, e1);
920 	kmem_cache_free(ubi_wl_entry_slab, e2);
921 
922 out_ro:
923 	ubi_ro_mode(ubi);
924 	mutex_unlock(&ubi->move_mutex);
925 	ubi_assert(err != 0);
926 	return err < 0 ? err : -EIO;
927 
928 out_cancel:
929 	ubi->wl_scheduled = 0;
930 	spin_unlock(&ubi->wl_lock);
931 	mutex_unlock(&ubi->move_mutex);
932 	ubi_free_vid_hdr(ubi, vid_hdr);
933 	return 0;
934 }
935 
936 /**
937  * ensure_wear_leveling - schedule wear-leveling if it is needed.
938  * @ubi: UBI device description object
939  *
940  * This function checks if it is time to start wear-leveling and schedules it
941  * if yes. This function returns zero in case of success and a negative error
942  * code in case of failure.
943  */
944 static int ensure_wear_leveling(struct ubi_device *ubi)
945 {
946 	int err = 0;
947 	struct ubi_wl_entry *e1;
948 	struct ubi_wl_entry *e2;
949 	struct ubi_work *wrk;
950 
951 	spin_lock(&ubi->wl_lock);
952 	if (ubi->wl_scheduled)
953 		/* Wear-leveling is already in the work queue */
954 		goto out_unlock;
955 
956 	/*
957 	 * If the ubi->scrub tree is not empty, scrubbing is needed, and the
958 	 * the WL worker has to be scheduled anyway.
959 	 */
960 	if (!ubi->scrub.rb_node) {
961 		if (!ubi->used.rb_node || !ubi->free.rb_node)
962 			/* No physical eraseblocks - no deal */
963 			goto out_unlock;
964 
965 		/*
966 		 * We schedule wear-leveling only if the difference between the
967 		 * lowest erase counter of used physical eraseblocks and a high
968 		 * erase counter of free physical eraseblocks is greater than
969 		 * %UBI_WL_THRESHOLD.
970 		 */
971 		e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
972 		e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
973 
974 		if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
975 			goto out_unlock;
976 		dbg_wl("schedule wear-leveling");
977 	} else
978 		dbg_wl("schedule scrubbing");
979 
980 	ubi->wl_scheduled = 1;
981 	spin_unlock(&ubi->wl_lock);
982 
983 	wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
984 	if (!wrk) {
985 		err = -ENOMEM;
986 		goto out_cancel;
987 	}
988 
989 	wrk->func = &wear_leveling_worker;
990 	schedule_ubi_work(ubi, wrk);
991 	return err;
992 
993 out_cancel:
994 	spin_lock(&ubi->wl_lock);
995 	ubi->wl_scheduled = 0;
996 out_unlock:
997 	spin_unlock(&ubi->wl_lock);
998 	return err;
999 }
1000 
1001 /**
1002  * erase_worker - physical eraseblock erase worker function.
1003  * @ubi: UBI device description object
1004  * @wl_wrk: the work object
1005  * @cancel: non-zero if the worker has to free memory and exit
1006  *
1007  * This function erases a physical eraseblock and perform torture testing if
1008  * needed. It also takes care about marking the physical eraseblock bad if
1009  * needed. Returns zero in case of success and a negative error code in case of
1010  * failure.
1011  */
1012 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1013 			int cancel)
1014 {
1015 	struct ubi_wl_entry *e = wl_wrk->e;
1016 	int pnum = e->pnum, err, need;
1017 
1018 	if (cancel) {
1019 		dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
1020 		kfree(wl_wrk);
1021 		kmem_cache_free(ubi_wl_entry_slab, e);
1022 		return 0;
1023 	}
1024 
1025 	dbg_wl("erase PEB %d EC %d", pnum, e->ec);
1026 
1027 	err = sync_erase(ubi, e, wl_wrk->torture);
1028 	if (!err) {
1029 		/* Fine, we've erased it successfully */
1030 		kfree(wl_wrk);
1031 
1032 		spin_lock(&ubi->wl_lock);
1033 		wl_tree_add(e, &ubi->free);
1034 		spin_unlock(&ubi->wl_lock);
1035 
1036 		/*
1037 		 * One more erase operation has happened, take care about
1038 		 * protected physical eraseblocks.
1039 		 */
1040 		serve_prot_queue(ubi);
1041 
1042 		/* And take care about wear-leveling */
1043 		err = ensure_wear_leveling(ubi);
1044 		return err;
1045 	}
1046 
1047 	ubi_err("failed to erase PEB %d, error %d", pnum, err);
1048 	kfree(wl_wrk);
1049 	kmem_cache_free(ubi_wl_entry_slab, e);
1050 
1051 	if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
1052 	    err == -EBUSY) {
1053 		int err1;
1054 
1055 		/* Re-schedule the LEB for erasure */
1056 		err1 = schedule_erase(ubi, e, 0);
1057 		if (err1) {
1058 			err = err1;
1059 			goto out_ro;
1060 		}
1061 		return err;
1062 	} else if (err != -EIO) {
1063 		/*
1064 		 * If this is not %-EIO, we have no idea what to do. Scheduling
1065 		 * this physical eraseblock for erasure again would cause
1066 		 * errors again and again. Well, lets switch to R/O mode.
1067 		 */
1068 		goto out_ro;
1069 	}
1070 
1071 	/* It is %-EIO, the PEB went bad */
1072 
1073 	if (!ubi->bad_allowed) {
1074 		ubi_err("bad physical eraseblock %d detected", pnum);
1075 		goto out_ro;
1076 	}
1077 
1078 	spin_lock(&ubi->volumes_lock);
1079 	need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs + 1;
1080 	if (need > 0) {
1081 		need = ubi->avail_pebs >= need ? need : ubi->avail_pebs;
1082 		ubi->avail_pebs -= need;
1083 		ubi->rsvd_pebs += need;
1084 		ubi->beb_rsvd_pebs += need;
1085 		if (need > 0)
1086 			ubi_msg("reserve more %d PEBs", need);
1087 	}
1088 
1089 	if (ubi->beb_rsvd_pebs == 0) {
1090 		spin_unlock(&ubi->volumes_lock);
1091 		ubi_err("no reserved physical eraseblocks");
1092 		goto out_ro;
1093 	}
1094 	spin_unlock(&ubi->volumes_lock);
1095 
1096 	ubi_msg("mark PEB %d as bad", pnum);
1097 	err = ubi_io_mark_bad(ubi, pnum);
1098 	if (err)
1099 		goto out_ro;
1100 
1101 	spin_lock(&ubi->volumes_lock);
1102 	ubi->beb_rsvd_pebs -= 1;
1103 	ubi->bad_peb_count += 1;
1104 	ubi->good_peb_count -= 1;
1105 	ubi_calculate_reserved(ubi);
1106 	if (ubi->beb_rsvd_pebs)
1107 		ubi_msg("%d PEBs left in the reserve", ubi->beb_rsvd_pebs);
1108 	else
1109 		ubi_warn("last PEB from the reserved pool was used");
1110 	spin_unlock(&ubi->volumes_lock);
1111 
1112 	return err;
1113 
1114 out_ro:
1115 	ubi_ro_mode(ubi);
1116 	return err;
1117 }
1118 
1119 /**
1120  * ubi_wl_put_peb - return a PEB to the wear-leveling sub-system.
1121  * @ubi: UBI device description object
1122  * @pnum: physical eraseblock to return
1123  * @torture: if this physical eraseblock has to be tortured
1124  *
1125  * This function is called to return physical eraseblock @pnum to the pool of
1126  * free physical eraseblocks. The @torture flag has to be set if an I/O error
1127  * occurred to this @pnum and it has to be tested. This function returns zero
1128  * in case of success, and a negative error code in case of failure.
1129  */
1130 int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
1131 {
1132 	int err;
1133 	struct ubi_wl_entry *e;
1134 
1135 	dbg_wl("PEB %d", pnum);
1136 	ubi_assert(pnum >= 0);
1137 	ubi_assert(pnum < ubi->peb_count);
1138 
1139 retry:
1140 	spin_lock(&ubi->wl_lock);
1141 	e = ubi->lookuptbl[pnum];
1142 	if (e == ubi->move_from) {
1143 		/*
1144 		 * User is putting the physical eraseblock which was selected to
1145 		 * be moved. It will be scheduled for erasure in the
1146 		 * wear-leveling worker.
1147 		 */
1148 		dbg_wl("PEB %d is being moved, wait", pnum);
1149 		spin_unlock(&ubi->wl_lock);
1150 
1151 		/* Wait for the WL worker by taking the @ubi->move_mutex */
1152 		mutex_lock(&ubi->move_mutex);
1153 		mutex_unlock(&ubi->move_mutex);
1154 		goto retry;
1155 	} else if (e == ubi->move_to) {
1156 		/*
1157 		 * User is putting the physical eraseblock which was selected
1158 		 * as the target the data is moved to. It may happen if the EBA
1159 		 * sub-system already re-mapped the LEB in 'ubi_eba_copy_leb()'
1160 		 * but the WL sub-system has not put the PEB to the "used" tree
1161 		 * yet, but it is about to do this. So we just set a flag which
1162 		 * will tell the WL worker that the PEB is not needed anymore
1163 		 * and should be scheduled for erasure.
1164 		 */
1165 		dbg_wl("PEB %d is the target of data moving", pnum);
1166 		ubi_assert(!ubi->move_to_put);
1167 		ubi->move_to_put = 1;
1168 		spin_unlock(&ubi->wl_lock);
1169 		return 0;
1170 	} else {
1171 		if (in_wl_tree(e, &ubi->used)) {
1172 			paranoid_check_in_wl_tree(e, &ubi->used);
1173 			rb_erase(&e->u.rb, &ubi->used);
1174 		} else if (in_wl_tree(e, &ubi->scrub)) {
1175 			paranoid_check_in_wl_tree(e, &ubi->scrub);
1176 			rb_erase(&e->u.rb, &ubi->scrub);
1177 		} else if (in_wl_tree(e, &ubi->erroneous)) {
1178 			paranoid_check_in_wl_tree(e, &ubi->erroneous);
1179 			rb_erase(&e->u.rb, &ubi->erroneous);
1180 			ubi->erroneous_peb_count -= 1;
1181 			ubi_assert(ubi->erroneous_peb_count >= 0);
1182 			/* Erroneous PEBs should be tortured */
1183 			torture = 1;
1184 		} else {
1185 			err = prot_queue_del(ubi, e->pnum);
1186 			if (err) {
1187 				ubi_err("PEB %d not found", pnum);
1188 				ubi_ro_mode(ubi);
1189 				spin_unlock(&ubi->wl_lock);
1190 				return err;
1191 			}
1192 		}
1193 	}
1194 	spin_unlock(&ubi->wl_lock);
1195 
1196 	err = schedule_erase(ubi, e, torture);
1197 	if (err) {
1198 		spin_lock(&ubi->wl_lock);
1199 		wl_tree_add(e, &ubi->used);
1200 		spin_unlock(&ubi->wl_lock);
1201 	}
1202 
1203 	return err;
1204 }
1205 
1206 /**
1207  * ubi_wl_scrub_peb - schedule a physical eraseblock for scrubbing.
1208  * @ubi: UBI device description object
1209  * @pnum: the physical eraseblock to schedule
1210  *
1211  * If a bit-flip in a physical eraseblock is detected, this physical eraseblock
1212  * needs scrubbing. This function schedules a physical eraseblock for
1213  * scrubbing which is done in background. This function returns zero in case of
1214  * success and a negative error code in case of failure.
1215  */
1216 int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
1217 {
1218 	struct ubi_wl_entry *e;
1219 
1220 	dbg_msg("schedule PEB %d for scrubbing", pnum);
1221 
1222 retry:
1223 	spin_lock(&ubi->wl_lock);
1224 	e = ubi->lookuptbl[pnum];
1225 	if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) ||
1226 				   in_wl_tree(e, &ubi->erroneous)) {
1227 		spin_unlock(&ubi->wl_lock);
1228 		return 0;
1229 	}
1230 
1231 	if (e == ubi->move_to) {
1232 		/*
1233 		 * This physical eraseblock was used to move data to. The data
1234 		 * was moved but the PEB was not yet inserted to the proper
1235 		 * tree. We should just wait a little and let the WL worker
1236 		 * proceed.
1237 		 */
1238 		spin_unlock(&ubi->wl_lock);
1239 		dbg_wl("the PEB %d is not in proper tree, retry", pnum);
1240 		yield();
1241 		goto retry;
1242 	}
1243 
1244 	if (in_wl_tree(e, &ubi->used)) {
1245 		paranoid_check_in_wl_tree(e, &ubi->used);
1246 		rb_erase(&e->u.rb, &ubi->used);
1247 	} else {
1248 		int err;
1249 
1250 		err = prot_queue_del(ubi, e->pnum);
1251 		if (err) {
1252 			ubi_err("PEB %d not found", pnum);
1253 			ubi_ro_mode(ubi);
1254 			spin_unlock(&ubi->wl_lock);
1255 			return err;
1256 		}
1257 	}
1258 
1259 	wl_tree_add(e, &ubi->scrub);
1260 	spin_unlock(&ubi->wl_lock);
1261 
1262 	/*
1263 	 * Technically scrubbing is the same as wear-leveling, so it is done
1264 	 * by the WL worker.
1265 	 */
1266 	return ensure_wear_leveling(ubi);
1267 }
1268 
1269 /**
1270  * ubi_wl_flush - flush all pending works.
1271  * @ubi: UBI device description object
1272  *
1273  * This function returns zero in case of success and a negative error code in
1274  * case of failure.
1275  */
1276 int ubi_wl_flush(struct ubi_device *ubi)
1277 {
1278 	int err;
1279 
1280 	/*
1281 	 * Erase while the pending works queue is not empty, but not more than
1282 	 * the number of currently pending works.
1283 	 */
1284 	dbg_wl("flush (%d pending works)", ubi->works_count);
1285 	while (ubi->works_count) {
1286 		err = do_work(ubi);
1287 		if (err)
1288 			return err;
1289 	}
1290 
1291 	/*
1292 	 * Make sure all the works which have been done in parallel are
1293 	 * finished.
1294 	 */
1295 	down_write(&ubi->work_sem);
1296 	up_write(&ubi->work_sem);
1297 
1298 	/*
1299 	 * And in case last was the WL worker and it canceled the LEB
1300 	 * movement, flush again.
1301 	 */
1302 	while (ubi->works_count) {
1303 		dbg_wl("flush more (%d pending works)", ubi->works_count);
1304 		err = do_work(ubi);
1305 		if (err)
1306 			return err;
1307 	}
1308 
1309 	return 0;
1310 }
1311 
1312 /**
1313  * tree_destroy - destroy an RB-tree.
1314  * @root: the root of the tree to destroy
1315  */
1316 static void tree_destroy(struct rb_root *root)
1317 {
1318 	struct rb_node *rb;
1319 	struct ubi_wl_entry *e;
1320 
1321 	rb = root->rb_node;
1322 	while (rb) {
1323 		if (rb->rb_left)
1324 			rb = rb->rb_left;
1325 		else if (rb->rb_right)
1326 			rb = rb->rb_right;
1327 		else {
1328 			e = rb_entry(rb, struct ubi_wl_entry, u.rb);
1329 
1330 			rb = rb_parent(rb);
1331 			if (rb) {
1332 				if (rb->rb_left == &e->u.rb)
1333 					rb->rb_left = NULL;
1334 				else
1335 					rb->rb_right = NULL;
1336 			}
1337 
1338 			kmem_cache_free(ubi_wl_entry_slab, e);
1339 		}
1340 	}
1341 }
1342 
1343 /**
1344  * ubi_thread - UBI background thread.
1345  * @u: the UBI device description object pointer
1346  */
1347 int ubi_thread(void *u)
1348 {
1349 	int failures = 0;
1350 	struct ubi_device *ubi = u;
1351 
1352 	ubi_msg("background thread \"%s\" started, PID %d",
1353 		ubi->bgt_name, task_pid_nr(current));
1354 
1355 	set_freezable();
1356 	for (;;) {
1357 		int err;
1358 
1359 		if (kthread_should_stop())
1360 			break;
1361 
1362 		if (try_to_freeze())
1363 			continue;
1364 
1365 		spin_lock(&ubi->wl_lock);
1366 		if (list_empty(&ubi->works) || ubi->ro_mode ||
1367 		    !ubi->thread_enabled || ubi_dbg_is_bgt_disabled()) {
1368 			set_current_state(TASK_INTERRUPTIBLE);
1369 			spin_unlock(&ubi->wl_lock);
1370 			schedule();
1371 			continue;
1372 		}
1373 		spin_unlock(&ubi->wl_lock);
1374 
1375 		err = do_work(ubi);
1376 		if (err) {
1377 			ubi_err("%s: work failed with error code %d",
1378 				ubi->bgt_name, err);
1379 			if (failures++ > WL_MAX_FAILURES) {
1380 				/*
1381 				 * Too many failures, disable the thread and
1382 				 * switch to read-only mode.
1383 				 */
1384 				ubi_msg("%s: %d consecutive failures",
1385 					ubi->bgt_name, WL_MAX_FAILURES);
1386 				ubi_ro_mode(ubi);
1387 				ubi->thread_enabled = 0;
1388 				continue;
1389 			}
1390 		} else
1391 			failures = 0;
1392 
1393 		cond_resched();
1394 	}
1395 
1396 	dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
1397 	return 0;
1398 }
1399 
1400 /**
1401  * cancel_pending - cancel all pending works.
1402  * @ubi: UBI device description object
1403  */
1404 static void cancel_pending(struct ubi_device *ubi)
1405 {
1406 	while (!list_empty(&ubi->works)) {
1407 		struct ubi_work *wrk;
1408 
1409 		wrk = list_entry(ubi->works.next, struct ubi_work, list);
1410 		list_del(&wrk->list);
1411 		wrk->func(ubi, wrk, 1);
1412 		ubi->works_count -= 1;
1413 		ubi_assert(ubi->works_count >= 0);
1414 	}
1415 }
1416 
1417 /**
1418  * ubi_wl_init_scan - initialize the WL sub-system using scanning information.
1419  * @ubi: UBI device description object
1420  * @si: scanning information
1421  *
1422  * This function returns zero in case of success, and a negative error code in
1423  * case of failure.
1424  */
1425 int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1426 {
1427 	int err, i;
1428 	struct rb_node *rb1, *rb2;
1429 	struct ubi_scan_volume *sv;
1430 	struct ubi_scan_leb *seb, *tmp;
1431 	struct ubi_wl_entry *e;
1432 
1433 	ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT;
1434 	spin_lock_init(&ubi->wl_lock);
1435 	mutex_init(&ubi->move_mutex);
1436 	init_rwsem(&ubi->work_sem);
1437 	ubi->max_ec = si->max_ec;
1438 	INIT_LIST_HEAD(&ubi->works);
1439 
1440 	sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
1441 
1442 	err = -ENOMEM;
1443 	ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL);
1444 	if (!ubi->lookuptbl)
1445 		return err;
1446 
1447 	for (i = 0; i < UBI_PROT_QUEUE_LEN; i++)
1448 		INIT_LIST_HEAD(&ubi->pq[i]);
1449 	ubi->pq_head = 0;
1450 
1451 	list_for_each_entry_safe(seb, tmp, &si->erase, u.list) {
1452 		cond_resched();
1453 
1454 		e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1455 		if (!e)
1456 			goto out_free;
1457 
1458 		e->pnum = seb->pnum;
1459 		e->ec = seb->ec;
1460 		ubi->lookuptbl[e->pnum] = e;
1461 		if (schedule_erase(ubi, e, 0)) {
1462 			kmem_cache_free(ubi_wl_entry_slab, e);
1463 			goto out_free;
1464 		}
1465 	}
1466 
1467 	list_for_each_entry(seb, &si->free, u.list) {
1468 		cond_resched();
1469 
1470 		e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1471 		if (!e)
1472 			goto out_free;
1473 
1474 		e->pnum = seb->pnum;
1475 		e->ec = seb->ec;
1476 		ubi_assert(e->ec >= 0);
1477 		wl_tree_add(e, &ubi->free);
1478 		ubi->lookuptbl[e->pnum] = e;
1479 	}
1480 
1481 	ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
1482 		ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
1483 			cond_resched();
1484 
1485 			e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1486 			if (!e)
1487 				goto out_free;
1488 
1489 			e->pnum = seb->pnum;
1490 			e->ec = seb->ec;
1491 			ubi->lookuptbl[e->pnum] = e;
1492 			if (!seb->scrub) {
1493 				dbg_wl("add PEB %d EC %d to the used tree",
1494 				       e->pnum, e->ec);
1495 				wl_tree_add(e, &ubi->used);
1496 			} else {
1497 				dbg_wl("add PEB %d EC %d to the scrub tree",
1498 				       e->pnum, e->ec);
1499 				wl_tree_add(e, &ubi->scrub);
1500 			}
1501 		}
1502 	}
1503 
1504 	if (ubi->avail_pebs < WL_RESERVED_PEBS) {
1505 		ubi_err("no enough physical eraseblocks (%d, need %d)",
1506 			ubi->avail_pebs, WL_RESERVED_PEBS);
1507 		if (ubi->corr_peb_count)
1508 			ubi_err("%d PEBs are corrupted and not used",
1509 				ubi->corr_peb_count);
1510 		goto out_free;
1511 	}
1512 	ubi->avail_pebs -= WL_RESERVED_PEBS;
1513 	ubi->rsvd_pebs += WL_RESERVED_PEBS;
1514 
1515 	/* Schedule wear-leveling if needed */
1516 	err = ensure_wear_leveling(ubi);
1517 	if (err)
1518 		goto out_free;
1519 
1520 	return 0;
1521 
1522 out_free:
1523 	cancel_pending(ubi);
1524 	tree_destroy(&ubi->used);
1525 	tree_destroy(&ubi->free);
1526 	tree_destroy(&ubi->scrub);
1527 	kfree(ubi->lookuptbl);
1528 	return err;
1529 }
1530 
1531 /**
1532  * protection_queue_destroy - destroy the protection queue.
1533  * @ubi: UBI device description object
1534  */
1535 static void protection_queue_destroy(struct ubi_device *ubi)
1536 {
1537 	int i;
1538 	struct ubi_wl_entry *e, *tmp;
1539 
1540 	for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) {
1541 		list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
1542 			list_del(&e->u.list);
1543 			kmem_cache_free(ubi_wl_entry_slab, e);
1544 		}
1545 	}
1546 }
1547 
1548 /**
1549  * ubi_wl_close - close the wear-leveling sub-system.
1550  * @ubi: UBI device description object
1551  */
1552 void ubi_wl_close(struct ubi_device *ubi)
1553 {
1554 	dbg_wl("close the WL sub-system");
1555 	cancel_pending(ubi);
1556 	protection_queue_destroy(ubi);
1557 	tree_destroy(&ubi->used);
1558 	tree_destroy(&ubi->erroneous);
1559 	tree_destroy(&ubi->free);
1560 	tree_destroy(&ubi->scrub);
1561 	kfree(ubi->lookuptbl);
1562 }
1563 
1564 #ifdef CONFIG_MTD_UBI_DEBUG
1565 
1566 /**
1567  * paranoid_check_ec - make sure that the erase counter of a PEB is correct.
1568  * @ubi: UBI device description object
1569  * @pnum: the physical eraseblock number to check
1570  * @ec: the erase counter to check
1571  *
1572  * This function returns zero if the erase counter of physical eraseblock @pnum
1573  * is equivalent to @ec, and a negative error code if not or if an error occurred.
1574  */
1575 static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec)
1576 {
1577 	int err;
1578 	long long read_ec;
1579 	struct ubi_ec_hdr *ec_hdr;
1580 
1581 	if (!(ubi_chk_flags & UBI_CHK_GEN))
1582 		return 0;
1583 
1584 	ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
1585 	if (!ec_hdr)
1586 		return -ENOMEM;
1587 
1588 	err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1589 	if (err && err != UBI_IO_BITFLIPS) {
1590 		/* The header does not have to exist */
1591 		err = 0;
1592 		goto out_free;
1593 	}
1594 
1595 	read_ec = be64_to_cpu(ec_hdr->ec);
1596 	if (ec != read_ec) {
1597 		ubi_err("paranoid check failed for PEB %d", pnum);
1598 		ubi_err("read EC is %lld, should be %d", read_ec, ec);
1599 		ubi_dbg_dump_stack();
1600 		err = 1;
1601 	} else
1602 		err = 0;
1603 
1604 out_free:
1605 	kfree(ec_hdr);
1606 	return err;
1607 }
1608 
1609 /**
1610  * paranoid_check_in_wl_tree - check that wear-leveling entry is in WL RB-tree.
1611  * @e: the wear-leveling entry to check
1612  * @root: the root of the tree
1613  *
1614  * This function returns zero if @e is in the @root RB-tree and %-EINVAL if it
1615  * is not.
1616  */
1617 static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
1618 				     struct rb_root *root)
1619 {
1620 	if (!(ubi_chk_flags & UBI_CHK_GEN))
1621 		return 0;
1622 
1623 	if (in_wl_tree(e, root))
1624 		return 0;
1625 
1626 	ubi_err("paranoid check failed for PEB %d, EC %d, RB-tree %p ",
1627 		e->pnum, e->ec, root);
1628 	ubi_dbg_dump_stack();
1629 	return -EINVAL;
1630 }
1631 
1632 /**
1633  * paranoid_check_in_pq - check if wear-leveling entry is in the protection
1634  *                        queue.
1635  * @ubi: UBI device description object
1636  * @e: the wear-leveling entry to check
1637  *
1638  * This function returns zero if @e is in @ubi->pq and %-EINVAL if it is not.
1639  */
1640 static int paranoid_check_in_pq(struct ubi_device *ubi, struct ubi_wl_entry *e)
1641 {
1642 	struct ubi_wl_entry *p;
1643 	int i;
1644 
1645 	if (!(ubi_chk_flags & UBI_CHK_GEN))
1646 		return 0;
1647 
1648 	for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
1649 		list_for_each_entry(p, &ubi->pq[i], u.list)
1650 			if (p == e)
1651 				return 0;
1652 
1653 	ubi_err("paranoid check failed for PEB %d, EC %d, Protect queue",
1654 		e->pnum, e->ec);
1655 	ubi_dbg_dump_stack();
1656 	return -EINVAL;
1657 }
1658 
1659 #endif /* CONFIG_MTD_UBI_DEBUG */
1660