xref: /openbmc/u-boot/drivers/mtd/ubi/wl.c (revision ff94bc40)
1c91a719dSKyungmin Park /*
2c91a719dSKyungmin Park  * Copyright (c) International Business Machines Corp., 2006
3c91a719dSKyungmin Park  *
41a459660SWolfgang Denk  * SPDX-License-Identifier:	GPL-2.0+
5c91a719dSKyungmin Park  *
6c91a719dSKyungmin Park  * Authors: Artem Bityutskiy (Битюцкий Артём), Thomas Gleixner
7c91a719dSKyungmin Park  */
8c91a719dSKyungmin Park 
9c91a719dSKyungmin Park /*
10*ff94bc40SHeiko Schocher  * UBI wear-leveling sub-system.
11c91a719dSKyungmin Park  *
12*ff94bc40SHeiko Schocher  * This sub-system is responsible for wear-leveling. It works in terms of
13*ff94bc40SHeiko Schocher  * physical eraseblocks and erase counters and knows nothing about logical
14*ff94bc40SHeiko Schocher  * eraseblocks, volumes, etc. From this sub-system's perspective all physical
15*ff94bc40SHeiko Schocher  * eraseblocks are of two types - used and free. Used physical eraseblocks are
16*ff94bc40SHeiko Schocher  * those that were "get" by the 'ubi_wl_get_peb()' function, and free physical
17*ff94bc40SHeiko Schocher  * eraseblocks are those that were put by the 'ubi_wl_put_peb()' function.
18c91a719dSKyungmin Park  *
19c91a719dSKyungmin Park  * Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter
20*ff94bc40SHeiko Schocher  * header. The rest of the physical eraseblock contains only %0xFF bytes.
21c91a719dSKyungmin Park  *
22*ff94bc40SHeiko Schocher  * When physical eraseblocks are returned to the WL sub-system by means of the
23c91a719dSKyungmin Park  * 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is
24c91a719dSKyungmin Park  * done asynchronously in context of the per-UBI device background thread,
25*ff94bc40SHeiko Schocher  * which is also managed by the WL sub-system.
26c91a719dSKyungmin Park  *
27c91a719dSKyungmin Park  * The wear-leveling is ensured by means of moving the contents of used
28c91a719dSKyungmin Park  * physical eraseblocks with low erase counter to free physical eraseblocks
29c91a719dSKyungmin Park  * with high erase counter.
30c91a719dSKyungmin Park  *
31*ff94bc40SHeiko Schocher  * If the WL sub-system fails to erase a physical eraseblock, it marks it as
32*ff94bc40SHeiko Schocher  * bad.
33c91a719dSKyungmin Park  *
34*ff94bc40SHeiko Schocher  * This sub-system is also responsible for scrubbing. If a bit-flip is detected
35*ff94bc40SHeiko Schocher  * in a physical eraseblock, it has to be moved. Technically this is the same
36*ff94bc40SHeiko Schocher  * as moving it for wear-leveling reasons.
37c91a719dSKyungmin Park  *
38*ff94bc40SHeiko Schocher  * As it was said, for the UBI sub-system all physical eraseblocks are either
39*ff94bc40SHeiko Schocher  * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while
40*ff94bc40SHeiko Schocher  * used eraseblocks are kept in @wl->used, @wl->erroneous, or @wl->scrub
41*ff94bc40SHeiko Schocher  * RB-trees, as well as (temporarily) in the @wl->pq queue.
42c91a719dSKyungmin Park  *
43*ff94bc40SHeiko Schocher  * When the WL sub-system returns a physical eraseblock, the physical
44*ff94bc40SHeiko Schocher  * eraseblock is protected from being moved for some "time". For this reason,
45*ff94bc40SHeiko Schocher  * the physical eraseblock is not directly moved from the @wl->free tree to the
46*ff94bc40SHeiko Schocher  * @wl->used tree. There is a protection queue in between where this
47*ff94bc40SHeiko Schocher  * physical eraseblock is temporarily stored (@wl->pq).
48*ff94bc40SHeiko Schocher  *
49*ff94bc40SHeiko Schocher  * All this protection stuff is needed because:
50*ff94bc40SHeiko Schocher  *  o we don't want to move physical eraseblocks just after we have given them
51*ff94bc40SHeiko Schocher  *    to the user; instead, we first want to let users fill them up with data;
52*ff94bc40SHeiko Schocher  *
53*ff94bc40SHeiko Schocher  *  o there is a chance that the user will put the physical eraseblock very
54*ff94bc40SHeiko Schocher  *    soon, so it makes sense not to move it for some time, but wait.
55*ff94bc40SHeiko Schocher  *
56*ff94bc40SHeiko Schocher  * Physical eraseblocks stay protected only for limited time. But the "time" is
57*ff94bc40SHeiko Schocher  * measured in erase cycles in this case. This is implemented with help of the
58*ff94bc40SHeiko Schocher  * protection queue. Eraseblocks are put to the tail of this queue when they
59*ff94bc40SHeiko Schocher  * are returned by the 'ubi_wl_get_peb()', and eraseblocks are removed from the
60*ff94bc40SHeiko Schocher  * head of the queue on each erase operation (for any eraseblock). So the
61*ff94bc40SHeiko Schocher  * length of the queue defines how may (global) erase cycles PEBs are protected.
62*ff94bc40SHeiko Schocher  *
63*ff94bc40SHeiko Schocher  * To put it differently, each physical eraseblock has 2 main states: free and
64*ff94bc40SHeiko Schocher  * used. The former state corresponds to the @wl->free tree. The latter state
65*ff94bc40SHeiko Schocher  * is split up on several sub-states:
66*ff94bc40SHeiko Schocher  * o the WL movement is allowed (@wl->used tree);
67*ff94bc40SHeiko Schocher  * o the WL movement is disallowed (@wl->erroneous) because the PEB is
68*ff94bc40SHeiko Schocher  *   erroneous - e.g., there was a read error;
69*ff94bc40SHeiko Schocher  * o the WL movement is temporarily prohibited (@wl->pq queue);
70*ff94bc40SHeiko Schocher  * o scrubbing is needed (@wl->scrub tree).
71*ff94bc40SHeiko Schocher  *
72*ff94bc40SHeiko Schocher  * Depending on the sub-state, wear-leveling entries of the used physical
73*ff94bc40SHeiko Schocher  * eraseblocks may be kept in one of those structures.
74c91a719dSKyungmin Park  *
75c91a719dSKyungmin Park  * Note, in this implementation, we keep a small in-RAM object for each physical
76c91a719dSKyungmin Park  * eraseblock. This is surely not a scalable solution. But it appears to be good
77c91a719dSKyungmin Park  * enough for moderately large flashes and it is simple. In future, one may
78*ff94bc40SHeiko Schocher  * re-work this sub-system and make it more scalable.
79c91a719dSKyungmin Park  *
80*ff94bc40SHeiko Schocher  * At the moment this sub-system does not utilize the sequence number, which
81*ff94bc40SHeiko Schocher  * was introduced relatively recently. But it would be wise to do this because
82*ff94bc40SHeiko Schocher  * the sequence number of a logical eraseblock characterizes how old is it. For
83c91a719dSKyungmin Park  * example, when we move a PEB with low erase counter, and we need to pick the
84c91a719dSKyungmin Park  * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we
85c91a719dSKyungmin Park  * pick target PEB with an average EC if our PEB is not very "old". This is a
86*ff94bc40SHeiko Schocher  * room for future re-works of the WL sub-system.
87c91a719dSKyungmin Park  */
88c91a719dSKyungmin Park 
89*ff94bc40SHeiko Schocher #define __UBOOT__
90*ff94bc40SHeiko Schocher #ifndef __UBOOT__
91c91a719dSKyungmin Park #include <linux/slab.h>
92c91a719dSKyungmin Park #include <linux/crc32.h>
93c91a719dSKyungmin Park #include <linux/freezer.h>
94c91a719dSKyungmin Park #include <linux/kthread.h>
95*ff94bc40SHeiko Schocher #else
96*ff94bc40SHeiko Schocher #include <ubi_uboot.h>
97c91a719dSKyungmin Park #endif
98c91a719dSKyungmin Park 
99c91a719dSKyungmin Park #include "ubi.h"
100c91a719dSKyungmin Park 
101c91a719dSKyungmin Park /* Number of physical eraseblocks reserved for wear-leveling purposes */
102c91a719dSKyungmin Park #define WL_RESERVED_PEBS 1
103c91a719dSKyungmin Park 
104c91a719dSKyungmin Park /*
105c91a719dSKyungmin Park  * Maximum difference between two erase counters. If this threshold is
106*ff94bc40SHeiko Schocher  * exceeded, the WL sub-system starts moving data from used physical
107*ff94bc40SHeiko Schocher  * eraseblocks with low erase counter to free physical eraseblocks with high
108*ff94bc40SHeiko Schocher  * erase counter.
109c91a719dSKyungmin Park  */
110c91a719dSKyungmin Park #define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD
111c91a719dSKyungmin Park 
112c91a719dSKyungmin Park /*
113*ff94bc40SHeiko Schocher  * When a physical eraseblock is moved, the WL sub-system has to pick the target
114c91a719dSKyungmin Park  * physical eraseblock to move to. The simplest way would be just to pick the
115c91a719dSKyungmin Park  * one with the highest erase counter. But in certain workloads this could lead
116c91a719dSKyungmin Park  * to an unlimited wear of one or few physical eraseblock. Indeed, imagine a
117c91a719dSKyungmin Park  * situation when the picked physical eraseblock is constantly erased after the
118c91a719dSKyungmin Park  * data is written to it. So, we have a constant which limits the highest erase
119*ff94bc40SHeiko Schocher  * counter of the free physical eraseblock to pick. Namely, the WL sub-system
120*ff94bc40SHeiko Schocher  * does not pick eraseblocks with erase counter greater than the lowest erase
121c91a719dSKyungmin Park  * counter plus %WL_FREE_MAX_DIFF.
122c91a719dSKyungmin Park  */
123c91a719dSKyungmin Park #define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD)
124c91a719dSKyungmin Park 
125c91a719dSKyungmin Park /*
126c91a719dSKyungmin Park  * Maximum number of consecutive background thread failures which is enough to
127c91a719dSKyungmin Park  * switch to read-only mode.
128c91a719dSKyungmin Park  */
129c91a719dSKyungmin Park #define WL_MAX_FAILURES 32
130c91a719dSKyungmin Park 
131*ff94bc40SHeiko Schocher static int self_check_ec(struct ubi_device *ubi, int pnum, int ec);
132*ff94bc40SHeiko Schocher static int self_check_in_wl_tree(const struct ubi_device *ubi,
133*ff94bc40SHeiko Schocher 				 struct ubi_wl_entry *e, struct rb_root *root);
134*ff94bc40SHeiko Schocher static int self_check_in_pq(const struct ubi_device *ubi,
135*ff94bc40SHeiko Schocher 			    struct ubi_wl_entry *e);
136*ff94bc40SHeiko Schocher 
137*ff94bc40SHeiko Schocher #ifdef CONFIG_MTD_UBI_FASTMAP
138*ff94bc40SHeiko Schocher #ifndef __UBOOT__
139c91a719dSKyungmin Park /**
140*ff94bc40SHeiko Schocher  * update_fastmap_work_fn - calls ubi_update_fastmap from a work queue
141*ff94bc40SHeiko Schocher  * @wrk: the work description object
142c91a719dSKyungmin Park  */
143*ff94bc40SHeiko Schocher static void update_fastmap_work_fn(struct work_struct *wrk)
144*ff94bc40SHeiko Schocher {
145*ff94bc40SHeiko Schocher 	struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
146*ff94bc40SHeiko Schocher 	ubi_update_fastmap(ubi);
147*ff94bc40SHeiko Schocher }
148*ff94bc40SHeiko Schocher #endif
149c91a719dSKyungmin Park 
150c91a719dSKyungmin Park /**
151*ff94bc40SHeiko Schocher  *  ubi_ubi_is_fm_block - returns 1 if a PEB is currently used in a fastmap.
152*ff94bc40SHeiko Schocher  *  @ubi: UBI device description object
153*ff94bc40SHeiko Schocher  *  @pnum: the to be checked PEB
154c91a719dSKyungmin Park  */
155*ff94bc40SHeiko Schocher static int ubi_is_fm_block(struct ubi_device *ubi, int pnum)
156*ff94bc40SHeiko Schocher {
157*ff94bc40SHeiko Schocher 	int i;
158c91a719dSKyungmin Park 
159*ff94bc40SHeiko Schocher 	if (!ubi->fm)
160*ff94bc40SHeiko Schocher 		return 0;
161*ff94bc40SHeiko Schocher 
162*ff94bc40SHeiko Schocher 	for (i = 0; i < ubi->fm->used_blocks; i++)
163*ff94bc40SHeiko Schocher 		if (ubi->fm->e[i]->pnum == pnum)
164*ff94bc40SHeiko Schocher 			return 1;
165*ff94bc40SHeiko Schocher 
166*ff94bc40SHeiko Schocher 	return 0;
167*ff94bc40SHeiko Schocher }
168c91a719dSKyungmin Park #else
169*ff94bc40SHeiko Schocher static int ubi_is_fm_block(struct ubi_device *ubi, int pnum)
170*ff94bc40SHeiko Schocher {
171*ff94bc40SHeiko Schocher 	return 0;
172*ff94bc40SHeiko Schocher }
173c91a719dSKyungmin Park #endif
174c91a719dSKyungmin Park 
175c91a719dSKyungmin Park /**
176c91a719dSKyungmin Park  * wl_tree_add - add a wear-leveling entry to a WL RB-tree.
177c91a719dSKyungmin Park  * @e: the wear-leveling entry to add
178c91a719dSKyungmin Park  * @root: the root of the tree
179c91a719dSKyungmin Park  *
180c91a719dSKyungmin Park  * Note, we use (erase counter, physical eraseblock number) pairs as keys in
181c91a719dSKyungmin Park  * the @ubi->used and @ubi->free RB-trees.
182c91a719dSKyungmin Park  */
183c91a719dSKyungmin Park static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
184c91a719dSKyungmin Park {
185c91a719dSKyungmin Park 	struct rb_node **p, *parent = NULL;
186c91a719dSKyungmin Park 
187c91a719dSKyungmin Park 	p = &root->rb_node;
188c91a719dSKyungmin Park 	while (*p) {
189c91a719dSKyungmin Park 		struct ubi_wl_entry *e1;
190c91a719dSKyungmin Park 
191c91a719dSKyungmin Park 		parent = *p;
192*ff94bc40SHeiko Schocher 		e1 = rb_entry(parent, struct ubi_wl_entry, u.rb);
193c91a719dSKyungmin Park 
194c91a719dSKyungmin Park 		if (e->ec < e1->ec)
195c91a719dSKyungmin Park 			p = &(*p)->rb_left;
196c91a719dSKyungmin Park 		else if (e->ec > e1->ec)
197c91a719dSKyungmin Park 			p = &(*p)->rb_right;
198c91a719dSKyungmin Park 		else {
199c91a719dSKyungmin Park 			ubi_assert(e->pnum != e1->pnum);
200c91a719dSKyungmin Park 			if (e->pnum < e1->pnum)
201c91a719dSKyungmin Park 				p = &(*p)->rb_left;
202c91a719dSKyungmin Park 			else
203c91a719dSKyungmin Park 				p = &(*p)->rb_right;
204c91a719dSKyungmin Park 		}
205c91a719dSKyungmin Park 	}
206c91a719dSKyungmin Park 
207*ff94bc40SHeiko Schocher 	rb_link_node(&e->u.rb, parent, p);
208*ff94bc40SHeiko Schocher 	rb_insert_color(&e->u.rb, root);
209c91a719dSKyungmin Park }
210c91a719dSKyungmin Park 
211c91a719dSKyungmin Park /**
212c91a719dSKyungmin Park  * do_work - do one pending work.
213c91a719dSKyungmin Park  * @ubi: UBI device description object
214c91a719dSKyungmin Park  *
215c91a719dSKyungmin Park  * This function returns zero in case of success and a negative error code in
216c91a719dSKyungmin Park  * case of failure.
217c91a719dSKyungmin Park  */
218c91a719dSKyungmin Park static int do_work(struct ubi_device *ubi)
219c91a719dSKyungmin Park {
220c91a719dSKyungmin Park 	int err;
221c91a719dSKyungmin Park 	struct ubi_work *wrk;
222c91a719dSKyungmin Park 
223c91a719dSKyungmin Park 	cond_resched();
224c91a719dSKyungmin Park 
225c91a719dSKyungmin Park 	/*
226c91a719dSKyungmin Park 	 * @ubi->work_sem is used to synchronize with the workers. Workers take
227c91a719dSKyungmin Park 	 * it in read mode, so many of them may be doing works at a time. But
228c91a719dSKyungmin Park 	 * the queue flush code has to be sure the whole queue of works is
229c91a719dSKyungmin Park 	 * done, and it takes the mutex in write mode.
230c91a719dSKyungmin Park 	 */
231c91a719dSKyungmin Park 	down_read(&ubi->work_sem);
232c91a719dSKyungmin Park 	spin_lock(&ubi->wl_lock);
233c91a719dSKyungmin Park 	if (list_empty(&ubi->works)) {
234c91a719dSKyungmin Park 		spin_unlock(&ubi->wl_lock);
235c91a719dSKyungmin Park 		up_read(&ubi->work_sem);
236c91a719dSKyungmin Park 		return 0;
237c91a719dSKyungmin Park 	}
238c91a719dSKyungmin Park 
239c91a719dSKyungmin Park 	wrk = list_entry(ubi->works.next, struct ubi_work, list);
240c91a719dSKyungmin Park 	list_del(&wrk->list);
241c91a719dSKyungmin Park 	ubi->works_count -= 1;
242c91a719dSKyungmin Park 	ubi_assert(ubi->works_count >= 0);
243c91a719dSKyungmin Park 	spin_unlock(&ubi->wl_lock);
244c91a719dSKyungmin Park 
245c91a719dSKyungmin Park 	/*
246c91a719dSKyungmin Park 	 * Call the worker function. Do not touch the work structure
247c91a719dSKyungmin Park 	 * after this call as it will have been freed or reused by that
248c91a719dSKyungmin Park 	 * time by the worker function.
249c91a719dSKyungmin Park 	 */
250c91a719dSKyungmin Park 	err = wrk->func(ubi, wrk, 0);
251c91a719dSKyungmin Park 	if (err)
252c91a719dSKyungmin Park 		ubi_err("work failed with error code %d", err);
253c91a719dSKyungmin Park 	up_read(&ubi->work_sem);
254c91a719dSKyungmin Park 
255c91a719dSKyungmin Park 	return err;
256c91a719dSKyungmin Park }
257c91a719dSKyungmin Park 
258c91a719dSKyungmin Park /**
259c91a719dSKyungmin Park  * produce_free_peb - produce a free physical eraseblock.
260c91a719dSKyungmin Park  * @ubi: UBI device description object
261c91a719dSKyungmin Park  *
262c91a719dSKyungmin Park  * This function tries to make a free PEB by means of synchronous execution of
263c91a719dSKyungmin Park  * pending works. This may be needed if, for example the background thread is
264c91a719dSKyungmin Park  * disabled. Returns zero in case of success and a negative error code in case
265c91a719dSKyungmin Park  * of failure.
266c91a719dSKyungmin Park  */
267c91a719dSKyungmin Park static int produce_free_peb(struct ubi_device *ubi)
268c91a719dSKyungmin Park {
269c91a719dSKyungmin Park 	int err;
270c91a719dSKyungmin Park 
271c91a719dSKyungmin Park 	while (!ubi->free.rb_node) {
272c91a719dSKyungmin Park 		spin_unlock(&ubi->wl_lock);
273c91a719dSKyungmin Park 
274c91a719dSKyungmin Park 		dbg_wl("do one work synchronously");
275c91a719dSKyungmin Park 		err = do_work(ubi);
276c91a719dSKyungmin Park 
277c91a719dSKyungmin Park 		spin_lock(&ubi->wl_lock);
278*ff94bc40SHeiko Schocher 		if (err)
279*ff94bc40SHeiko Schocher 			return err;
280c91a719dSKyungmin Park 	}
281c91a719dSKyungmin Park 
282c91a719dSKyungmin Park 	return 0;
283c91a719dSKyungmin Park }
284c91a719dSKyungmin Park 
285c91a719dSKyungmin Park /**
286c91a719dSKyungmin Park  * in_wl_tree - check if wear-leveling entry is present in a WL RB-tree.
287c91a719dSKyungmin Park  * @e: the wear-leveling entry to check
288c91a719dSKyungmin Park  * @root: the root of the tree
289c91a719dSKyungmin Park  *
290c91a719dSKyungmin Park  * This function returns non-zero if @e is in the @root RB-tree and zero if it
291c91a719dSKyungmin Park  * is not.
292c91a719dSKyungmin Park  */
293c91a719dSKyungmin Park static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
294c91a719dSKyungmin Park {
295c91a719dSKyungmin Park 	struct rb_node *p;
296c91a719dSKyungmin Park 
297c91a719dSKyungmin Park 	p = root->rb_node;
298c91a719dSKyungmin Park 	while (p) {
299c91a719dSKyungmin Park 		struct ubi_wl_entry *e1;
300c91a719dSKyungmin Park 
301*ff94bc40SHeiko Schocher 		e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
302c91a719dSKyungmin Park 
303c91a719dSKyungmin Park 		if (e->pnum == e1->pnum) {
304c91a719dSKyungmin Park 			ubi_assert(e == e1);
305c91a719dSKyungmin Park 			return 1;
306c91a719dSKyungmin Park 		}
307c91a719dSKyungmin Park 
308c91a719dSKyungmin Park 		if (e->ec < e1->ec)
309c91a719dSKyungmin Park 			p = p->rb_left;
310c91a719dSKyungmin Park 		else if (e->ec > e1->ec)
311c91a719dSKyungmin Park 			p = p->rb_right;
312c91a719dSKyungmin Park 		else {
313c91a719dSKyungmin Park 			ubi_assert(e->pnum != e1->pnum);
314c91a719dSKyungmin Park 			if (e->pnum < e1->pnum)
315c91a719dSKyungmin Park 				p = p->rb_left;
316c91a719dSKyungmin Park 			else
317c91a719dSKyungmin Park 				p = p->rb_right;
318c91a719dSKyungmin Park 		}
319c91a719dSKyungmin Park 	}
320c91a719dSKyungmin Park 
321c91a719dSKyungmin Park 	return 0;
322c91a719dSKyungmin Park }
323c91a719dSKyungmin Park 
324c91a719dSKyungmin Park /**
325*ff94bc40SHeiko Schocher  * prot_queue_add - add physical eraseblock to the protection queue.
326c91a719dSKyungmin Park  * @ubi: UBI device description object
327c91a719dSKyungmin Park  * @e: the physical eraseblock to add
328c91a719dSKyungmin Park  *
329*ff94bc40SHeiko Schocher  * This function adds @e to the tail of the protection queue @ubi->pq, where
330*ff94bc40SHeiko Schocher  * @e will stay for %UBI_PROT_QUEUE_LEN erase operations and will be
331*ff94bc40SHeiko Schocher  * temporarily protected from the wear-leveling worker. Note, @wl->lock has to
332*ff94bc40SHeiko Schocher  * be locked.
333c91a719dSKyungmin Park  */
334*ff94bc40SHeiko Schocher static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
335c91a719dSKyungmin Park {
336*ff94bc40SHeiko Schocher 	int pq_tail = ubi->pq_head - 1;
337c91a719dSKyungmin Park 
338*ff94bc40SHeiko Schocher 	if (pq_tail < 0)
339*ff94bc40SHeiko Schocher 		pq_tail = UBI_PROT_QUEUE_LEN - 1;
340*ff94bc40SHeiko Schocher 	ubi_assert(pq_tail >= 0 && pq_tail < UBI_PROT_QUEUE_LEN);
341*ff94bc40SHeiko Schocher 	list_add_tail(&e->u.list, &ubi->pq[pq_tail]);
342*ff94bc40SHeiko Schocher 	dbg_wl("added PEB %d EC %d to the protection queue", e->pnum, e->ec);
343c91a719dSKyungmin Park }
344c91a719dSKyungmin Park 
345c91a719dSKyungmin Park /**
346c91a719dSKyungmin Park  * find_wl_entry - find wear-leveling entry closest to certain erase counter.
347*ff94bc40SHeiko Schocher  * @ubi: UBI device description object
348c91a719dSKyungmin Park  * @root: the RB-tree where to look for
349*ff94bc40SHeiko Schocher  * @diff: maximum possible difference from the smallest erase counter
350c91a719dSKyungmin Park  *
351c91a719dSKyungmin Park  * This function looks for a wear leveling entry with erase counter closest to
352*ff94bc40SHeiko Schocher  * min + @diff, where min is the smallest erase counter.
353c91a719dSKyungmin Park  */
354*ff94bc40SHeiko Schocher static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
355*ff94bc40SHeiko Schocher 					  struct rb_root *root, int diff)
356c91a719dSKyungmin Park {
357c91a719dSKyungmin Park 	struct rb_node *p;
358*ff94bc40SHeiko Schocher 	struct ubi_wl_entry *e, *prev_e = NULL;
359*ff94bc40SHeiko Schocher 	int max;
360c91a719dSKyungmin Park 
361*ff94bc40SHeiko Schocher 	e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
362*ff94bc40SHeiko Schocher 	max = e->ec + diff;
363c91a719dSKyungmin Park 
364c91a719dSKyungmin Park 	p = root->rb_node;
365c91a719dSKyungmin Park 	while (p) {
366c91a719dSKyungmin Park 		struct ubi_wl_entry *e1;
367c91a719dSKyungmin Park 
368*ff94bc40SHeiko Schocher 		e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
369c91a719dSKyungmin Park 		if (e1->ec >= max)
370c91a719dSKyungmin Park 			p = p->rb_left;
371c91a719dSKyungmin Park 		else {
372c91a719dSKyungmin Park 			p = p->rb_right;
373*ff94bc40SHeiko Schocher 			prev_e = e;
374c91a719dSKyungmin Park 			e = e1;
375c91a719dSKyungmin Park 		}
376c91a719dSKyungmin Park 	}
377c91a719dSKyungmin Park 
378*ff94bc40SHeiko Schocher 	/* If no fastmap has been written and this WL entry can be used
379*ff94bc40SHeiko Schocher 	 * as anchor PEB, hold it back and return the second best WL entry
380*ff94bc40SHeiko Schocher 	 * such that fastmap can use the anchor PEB later. */
381*ff94bc40SHeiko Schocher 	if (prev_e && !ubi->fm_disabled &&
382*ff94bc40SHeiko Schocher 	    !ubi->fm && e->pnum < UBI_FM_MAX_START)
383*ff94bc40SHeiko Schocher 		return prev_e;
384*ff94bc40SHeiko Schocher 
385c91a719dSKyungmin Park 	return e;
386c91a719dSKyungmin Park }
387c91a719dSKyungmin Park 
388c91a719dSKyungmin Park /**
389*ff94bc40SHeiko Schocher  * find_mean_wl_entry - find wear-leveling entry with medium erase counter.
390c91a719dSKyungmin Park  * @ubi: UBI device description object
391*ff94bc40SHeiko Schocher  * @root: the RB-tree where to look for
392c91a719dSKyungmin Park  *
393*ff94bc40SHeiko Schocher  * This function looks for a wear leveling entry with medium erase counter,
394*ff94bc40SHeiko Schocher  * but not greater or equivalent than the lowest erase counter plus
395*ff94bc40SHeiko Schocher  * %WL_FREE_MAX_DIFF/2.
396c91a719dSKyungmin Park  */
397*ff94bc40SHeiko Schocher static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi,
398*ff94bc40SHeiko Schocher 					       struct rb_root *root)
399c91a719dSKyungmin Park {
400c91a719dSKyungmin Park 	struct ubi_wl_entry *e, *first, *last;
401c91a719dSKyungmin Park 
402*ff94bc40SHeiko Schocher 	first = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
403*ff94bc40SHeiko Schocher 	last = rb_entry(rb_last(root), struct ubi_wl_entry, u.rb);
404c91a719dSKyungmin Park 
405*ff94bc40SHeiko Schocher 	if (last->ec - first->ec < WL_FREE_MAX_DIFF) {
406*ff94bc40SHeiko Schocher 		e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb);
407c91a719dSKyungmin Park 
408*ff94bc40SHeiko Schocher #ifdef CONFIG_MTD_UBI_FASTMAP
409*ff94bc40SHeiko Schocher 		/* If no fastmap has been written and this WL entry can be used
410*ff94bc40SHeiko Schocher 		 * as anchor PEB, hold it back and return the second best
411*ff94bc40SHeiko Schocher 		 * WL entry such that fastmap can use the anchor PEB later. */
412*ff94bc40SHeiko Schocher 		if (e && !ubi->fm_disabled && !ubi->fm &&
413*ff94bc40SHeiko Schocher 		    e->pnum < UBI_FM_MAX_START)
414*ff94bc40SHeiko Schocher 			e = rb_entry(rb_next(root->rb_node),
415*ff94bc40SHeiko Schocher 				     struct ubi_wl_entry, u.rb);
416*ff94bc40SHeiko Schocher #endif
417*ff94bc40SHeiko Schocher 	} else
418*ff94bc40SHeiko Schocher 		e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2);
419c91a719dSKyungmin Park 
420*ff94bc40SHeiko Schocher 	return e;
421c91a719dSKyungmin Park }
422c91a719dSKyungmin Park 
423*ff94bc40SHeiko Schocher #ifdef CONFIG_MTD_UBI_FASTMAP
424*ff94bc40SHeiko Schocher /**
425*ff94bc40SHeiko Schocher  * find_anchor_wl_entry - find wear-leveling entry to used as anchor PEB.
426*ff94bc40SHeiko Schocher  * @root: the RB-tree where to look for
427c91a719dSKyungmin Park  */
428*ff94bc40SHeiko Schocher static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
429*ff94bc40SHeiko Schocher {
430*ff94bc40SHeiko Schocher 	struct rb_node *p;
431*ff94bc40SHeiko Schocher 	struct ubi_wl_entry *e, *victim = NULL;
432*ff94bc40SHeiko Schocher 	int max_ec = UBI_MAX_ERASECOUNTER;
433c91a719dSKyungmin Park 
434*ff94bc40SHeiko Schocher 	ubi_rb_for_each_entry(p, e, root, u.rb) {
435*ff94bc40SHeiko Schocher 		if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) {
436*ff94bc40SHeiko Schocher 			victim = e;
437*ff94bc40SHeiko Schocher 			max_ec = e->ec;
438c91a719dSKyungmin Park 		}
439c91a719dSKyungmin Park 	}
440c91a719dSKyungmin Park 
441*ff94bc40SHeiko Schocher 	return victim;
442*ff94bc40SHeiko Schocher }
443c91a719dSKyungmin Park 
444*ff94bc40SHeiko Schocher static int anchor_pebs_avalible(struct rb_root *root)
445*ff94bc40SHeiko Schocher {
446*ff94bc40SHeiko Schocher 	struct rb_node *p;
447*ff94bc40SHeiko Schocher 	struct ubi_wl_entry *e;
448c91a719dSKyungmin Park 
449*ff94bc40SHeiko Schocher 	ubi_rb_for_each_entry(p, e, root, u.rb)
450*ff94bc40SHeiko Schocher 		if (e->pnum < UBI_FM_MAX_START)
451*ff94bc40SHeiko Schocher 			return 1;
452*ff94bc40SHeiko Schocher 
453*ff94bc40SHeiko Schocher 	return 0;
454c91a719dSKyungmin Park }
455c91a719dSKyungmin Park 
456c91a719dSKyungmin Park /**
457*ff94bc40SHeiko Schocher  * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
458*ff94bc40SHeiko Schocher  * @ubi: UBI device description object
459*ff94bc40SHeiko Schocher  * @anchor: This PEB will be used as anchor PEB by fastmap
460*ff94bc40SHeiko Schocher  *
461*ff94bc40SHeiko Schocher  * The function returns a physical erase block with a given maximal number
462*ff94bc40SHeiko Schocher  * and removes it from the wl subsystem.
463*ff94bc40SHeiko Schocher  * Must be called with wl_lock held!
464*ff94bc40SHeiko Schocher  */
465*ff94bc40SHeiko Schocher struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
466*ff94bc40SHeiko Schocher {
467*ff94bc40SHeiko Schocher 	struct ubi_wl_entry *e = NULL;
468*ff94bc40SHeiko Schocher 
469*ff94bc40SHeiko Schocher 	if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
470*ff94bc40SHeiko Schocher 		goto out;
471*ff94bc40SHeiko Schocher 
472*ff94bc40SHeiko Schocher 	if (anchor)
473*ff94bc40SHeiko Schocher 		e = find_anchor_wl_entry(&ubi->free);
474*ff94bc40SHeiko Schocher 	else
475*ff94bc40SHeiko Schocher 		e = find_mean_wl_entry(ubi, &ubi->free);
476*ff94bc40SHeiko Schocher 
477*ff94bc40SHeiko Schocher 	if (!e)
478*ff94bc40SHeiko Schocher 		goto out;
479*ff94bc40SHeiko Schocher 
480*ff94bc40SHeiko Schocher 	self_check_in_wl_tree(ubi, e, &ubi->free);
481*ff94bc40SHeiko Schocher 
482*ff94bc40SHeiko Schocher 	/* remove it from the free list,
483*ff94bc40SHeiko Schocher 	 * the wl subsystem does no longer know this erase block */
484*ff94bc40SHeiko Schocher 	rb_erase(&e->u.rb, &ubi->free);
485*ff94bc40SHeiko Schocher 	ubi->free_count--;
486*ff94bc40SHeiko Schocher out:
487*ff94bc40SHeiko Schocher 	return e;
488*ff94bc40SHeiko Schocher }
489*ff94bc40SHeiko Schocher #endif
490*ff94bc40SHeiko Schocher 
491*ff94bc40SHeiko Schocher /**
492*ff94bc40SHeiko Schocher  * __wl_get_peb - get a physical eraseblock.
493*ff94bc40SHeiko Schocher  * @ubi: UBI device description object
494*ff94bc40SHeiko Schocher  *
495*ff94bc40SHeiko Schocher  * This function returns a physical eraseblock in case of success and a
496*ff94bc40SHeiko Schocher  * negative error code in case of failure.
497*ff94bc40SHeiko Schocher  */
498*ff94bc40SHeiko Schocher static int __wl_get_peb(struct ubi_device *ubi)
499*ff94bc40SHeiko Schocher {
500*ff94bc40SHeiko Schocher 	int err;
501*ff94bc40SHeiko Schocher 	struct ubi_wl_entry *e;
502*ff94bc40SHeiko Schocher 
503*ff94bc40SHeiko Schocher retry:
504*ff94bc40SHeiko Schocher 	if (!ubi->free.rb_node) {
505*ff94bc40SHeiko Schocher 		if (ubi->works_count == 0) {
506*ff94bc40SHeiko Schocher 			ubi_err("no free eraseblocks");
507*ff94bc40SHeiko Schocher 			ubi_assert(list_empty(&ubi->works));
508*ff94bc40SHeiko Schocher 			return -ENOSPC;
509*ff94bc40SHeiko Schocher 		}
510*ff94bc40SHeiko Schocher 
511*ff94bc40SHeiko Schocher 		err = produce_free_peb(ubi);
512*ff94bc40SHeiko Schocher 		if (err < 0)
513*ff94bc40SHeiko Schocher 			return err;
514*ff94bc40SHeiko Schocher 		goto retry;
515*ff94bc40SHeiko Schocher 	}
516*ff94bc40SHeiko Schocher 
517*ff94bc40SHeiko Schocher 	e = find_mean_wl_entry(ubi, &ubi->free);
518*ff94bc40SHeiko Schocher 	if (!e) {
519*ff94bc40SHeiko Schocher 		ubi_err("no free eraseblocks");
520*ff94bc40SHeiko Schocher 		return -ENOSPC;
521*ff94bc40SHeiko Schocher 	}
522*ff94bc40SHeiko Schocher 
523*ff94bc40SHeiko Schocher 	self_check_in_wl_tree(ubi, e, &ubi->free);
524*ff94bc40SHeiko Schocher 
525*ff94bc40SHeiko Schocher 	/*
526*ff94bc40SHeiko Schocher 	 * Move the physical eraseblock to the protection queue where it will
527*ff94bc40SHeiko Schocher 	 * be protected from being moved for some time.
528*ff94bc40SHeiko Schocher 	 */
529*ff94bc40SHeiko Schocher 	rb_erase(&e->u.rb, &ubi->free);
530*ff94bc40SHeiko Schocher 	ubi->free_count--;
531*ff94bc40SHeiko Schocher 	dbg_wl("PEB %d EC %d", e->pnum, e->ec);
532*ff94bc40SHeiko Schocher #ifndef CONFIG_MTD_UBI_FASTMAP
533*ff94bc40SHeiko Schocher 	/* We have to enqueue e only if fastmap is disabled,
534*ff94bc40SHeiko Schocher 	 * is fastmap enabled prot_queue_add() will be called by
535*ff94bc40SHeiko Schocher 	 * ubi_wl_get_peb() after removing e from the pool. */
536*ff94bc40SHeiko Schocher 	prot_queue_add(ubi, e);
537*ff94bc40SHeiko Schocher #endif
538*ff94bc40SHeiko Schocher 	return e->pnum;
539*ff94bc40SHeiko Schocher }
540*ff94bc40SHeiko Schocher 
541*ff94bc40SHeiko Schocher #ifdef CONFIG_MTD_UBI_FASTMAP
542*ff94bc40SHeiko Schocher /**
543*ff94bc40SHeiko Schocher  * return_unused_pool_pebs - returns unused PEB to the free tree.
544*ff94bc40SHeiko Schocher  * @ubi: UBI device description object
545*ff94bc40SHeiko Schocher  * @pool: fastmap pool description object
546*ff94bc40SHeiko Schocher  */
547*ff94bc40SHeiko Schocher static void return_unused_pool_pebs(struct ubi_device *ubi,
548*ff94bc40SHeiko Schocher 				    struct ubi_fm_pool *pool)
549*ff94bc40SHeiko Schocher {
550*ff94bc40SHeiko Schocher 	int i;
551*ff94bc40SHeiko Schocher 	struct ubi_wl_entry *e;
552*ff94bc40SHeiko Schocher 
553*ff94bc40SHeiko Schocher 	for (i = pool->used; i < pool->size; i++) {
554*ff94bc40SHeiko Schocher 		e = ubi->lookuptbl[pool->pebs[i]];
555*ff94bc40SHeiko Schocher 		wl_tree_add(e, &ubi->free);
556*ff94bc40SHeiko Schocher 		ubi->free_count++;
557*ff94bc40SHeiko Schocher 	}
558*ff94bc40SHeiko Schocher }
559*ff94bc40SHeiko Schocher 
560*ff94bc40SHeiko Schocher /**
561*ff94bc40SHeiko Schocher  * refill_wl_pool - refills all the fastmap pool used by the
562*ff94bc40SHeiko Schocher  * WL sub-system.
563*ff94bc40SHeiko Schocher  * @ubi: UBI device description object
564*ff94bc40SHeiko Schocher  */
565*ff94bc40SHeiko Schocher static void refill_wl_pool(struct ubi_device *ubi)
566*ff94bc40SHeiko Schocher {
567*ff94bc40SHeiko Schocher 	struct ubi_wl_entry *e;
568*ff94bc40SHeiko Schocher 	struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
569*ff94bc40SHeiko Schocher 
570*ff94bc40SHeiko Schocher 	return_unused_pool_pebs(ubi, pool);
571*ff94bc40SHeiko Schocher 
572*ff94bc40SHeiko Schocher 	for (pool->size = 0; pool->size < pool->max_size; pool->size++) {
573*ff94bc40SHeiko Schocher 		if (!ubi->free.rb_node ||
574*ff94bc40SHeiko Schocher 		   (ubi->free_count - ubi->beb_rsvd_pebs < 5))
575*ff94bc40SHeiko Schocher 			break;
576*ff94bc40SHeiko Schocher 
577*ff94bc40SHeiko Schocher 		e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
578*ff94bc40SHeiko Schocher 		self_check_in_wl_tree(ubi, e, &ubi->free);
579*ff94bc40SHeiko Schocher 		rb_erase(&e->u.rb, &ubi->free);
580*ff94bc40SHeiko Schocher 		ubi->free_count--;
581*ff94bc40SHeiko Schocher 
582*ff94bc40SHeiko Schocher 		pool->pebs[pool->size] = e->pnum;
583*ff94bc40SHeiko Schocher 	}
584*ff94bc40SHeiko Schocher 	pool->used = 0;
585*ff94bc40SHeiko Schocher }
586*ff94bc40SHeiko Schocher 
587*ff94bc40SHeiko Schocher /**
588*ff94bc40SHeiko Schocher  * refill_wl_user_pool - refills all the fastmap pool used by ubi_wl_get_peb.
589*ff94bc40SHeiko Schocher  * @ubi: UBI device description object
590*ff94bc40SHeiko Schocher  */
591*ff94bc40SHeiko Schocher static void refill_wl_user_pool(struct ubi_device *ubi)
592*ff94bc40SHeiko Schocher {
593*ff94bc40SHeiko Schocher 	struct ubi_fm_pool *pool = &ubi->fm_pool;
594*ff94bc40SHeiko Schocher 
595*ff94bc40SHeiko Schocher 	return_unused_pool_pebs(ubi, pool);
596*ff94bc40SHeiko Schocher 
597*ff94bc40SHeiko Schocher 	for (pool->size = 0; pool->size < pool->max_size; pool->size++) {
598*ff94bc40SHeiko Schocher 		pool->pebs[pool->size] = __wl_get_peb(ubi);
599*ff94bc40SHeiko Schocher 		if (pool->pebs[pool->size] < 0)
600*ff94bc40SHeiko Schocher 			break;
601*ff94bc40SHeiko Schocher 	}
602*ff94bc40SHeiko Schocher 	pool->used = 0;
603*ff94bc40SHeiko Schocher }
604*ff94bc40SHeiko Schocher 
605*ff94bc40SHeiko Schocher /**
606*ff94bc40SHeiko Schocher  * ubi_refill_pools - refills all fastmap PEB pools.
607*ff94bc40SHeiko Schocher  * @ubi: UBI device description object
608*ff94bc40SHeiko Schocher  */
609*ff94bc40SHeiko Schocher void ubi_refill_pools(struct ubi_device *ubi)
610*ff94bc40SHeiko Schocher {
611*ff94bc40SHeiko Schocher 	spin_lock(&ubi->wl_lock);
612*ff94bc40SHeiko Schocher 	refill_wl_pool(ubi);
613*ff94bc40SHeiko Schocher 	refill_wl_user_pool(ubi);
614*ff94bc40SHeiko Schocher 	spin_unlock(&ubi->wl_lock);
615*ff94bc40SHeiko Schocher }
616*ff94bc40SHeiko Schocher 
617*ff94bc40SHeiko Schocher /* ubi_wl_get_peb - works exaclty like __wl_get_peb but keeps track of
618*ff94bc40SHeiko Schocher  * the fastmap pool.
619*ff94bc40SHeiko Schocher  */
620*ff94bc40SHeiko Schocher int ubi_wl_get_peb(struct ubi_device *ubi)
621*ff94bc40SHeiko Schocher {
622*ff94bc40SHeiko Schocher 	int ret;
623*ff94bc40SHeiko Schocher 	struct ubi_fm_pool *pool = &ubi->fm_pool;
624*ff94bc40SHeiko Schocher 	struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
625*ff94bc40SHeiko Schocher 
626*ff94bc40SHeiko Schocher 	if (!pool->size || !wl_pool->size || pool->used == pool->size ||
627*ff94bc40SHeiko Schocher 	    wl_pool->used == wl_pool->size)
628*ff94bc40SHeiko Schocher 		ubi_update_fastmap(ubi);
629*ff94bc40SHeiko Schocher 
630*ff94bc40SHeiko Schocher 	/* we got not a single free PEB */
631*ff94bc40SHeiko Schocher 	if (!pool->size)
632*ff94bc40SHeiko Schocher 		ret = -ENOSPC;
633*ff94bc40SHeiko Schocher 	else {
634*ff94bc40SHeiko Schocher 		spin_lock(&ubi->wl_lock);
635*ff94bc40SHeiko Schocher 		ret = pool->pebs[pool->used++];
636*ff94bc40SHeiko Schocher 		prot_queue_add(ubi, ubi->lookuptbl[ret]);
637*ff94bc40SHeiko Schocher 		spin_unlock(&ubi->wl_lock);
638*ff94bc40SHeiko Schocher 	}
639*ff94bc40SHeiko Schocher 
640*ff94bc40SHeiko Schocher 	return ret;
641*ff94bc40SHeiko Schocher }
642*ff94bc40SHeiko Schocher 
643*ff94bc40SHeiko Schocher /* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system.
644*ff94bc40SHeiko Schocher  *
645*ff94bc40SHeiko Schocher  * @ubi: UBI device description object
646*ff94bc40SHeiko Schocher  */
647*ff94bc40SHeiko Schocher static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
648*ff94bc40SHeiko Schocher {
649*ff94bc40SHeiko Schocher 	struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
650*ff94bc40SHeiko Schocher 	int pnum;
651*ff94bc40SHeiko Schocher 
652*ff94bc40SHeiko Schocher 	if (pool->used == pool->size || !pool->size) {
653*ff94bc40SHeiko Schocher 		/* We cannot update the fastmap here because this
654*ff94bc40SHeiko Schocher 		 * function is called in atomic context.
655*ff94bc40SHeiko Schocher 		 * Let's fail here and refill/update it as soon as possible. */
656*ff94bc40SHeiko Schocher #ifndef __UBOOT__
657*ff94bc40SHeiko Schocher 		schedule_work(&ubi->fm_work);
658*ff94bc40SHeiko Schocher #else
659*ff94bc40SHeiko Schocher 		/* In U-Boot we must call this directly */
660*ff94bc40SHeiko Schocher 	        ubi_update_fastmap(ubi);
661*ff94bc40SHeiko Schocher #endif
662*ff94bc40SHeiko Schocher 		return NULL;
663*ff94bc40SHeiko Schocher 	} else {
664*ff94bc40SHeiko Schocher 		pnum = pool->pebs[pool->used++];
665*ff94bc40SHeiko Schocher 		return ubi->lookuptbl[pnum];
666*ff94bc40SHeiko Schocher 	}
667*ff94bc40SHeiko Schocher }
668*ff94bc40SHeiko Schocher #else
669*ff94bc40SHeiko Schocher static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
670*ff94bc40SHeiko Schocher {
671*ff94bc40SHeiko Schocher 	struct ubi_wl_entry *e;
672*ff94bc40SHeiko Schocher 
673*ff94bc40SHeiko Schocher 	e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
674*ff94bc40SHeiko Schocher 	self_check_in_wl_tree(ubi, e, &ubi->free);
675*ff94bc40SHeiko Schocher 	rb_erase(&e->u.rb, &ubi->free);
676*ff94bc40SHeiko Schocher 
677*ff94bc40SHeiko Schocher 	return e;
678*ff94bc40SHeiko Schocher }
679*ff94bc40SHeiko Schocher 
680*ff94bc40SHeiko Schocher int ubi_wl_get_peb(struct ubi_device *ubi)
681*ff94bc40SHeiko Schocher {
682*ff94bc40SHeiko Schocher 	int peb, err;
683*ff94bc40SHeiko Schocher 
684*ff94bc40SHeiko Schocher 	spin_lock(&ubi->wl_lock);
685*ff94bc40SHeiko Schocher 	peb = __wl_get_peb(ubi);
686*ff94bc40SHeiko Schocher 	spin_unlock(&ubi->wl_lock);
687*ff94bc40SHeiko Schocher 
688*ff94bc40SHeiko Schocher 	err = ubi_self_check_all_ff(ubi, peb, ubi->vid_hdr_aloffset,
689*ff94bc40SHeiko Schocher 				    ubi->peb_size - ubi->vid_hdr_aloffset);
690*ff94bc40SHeiko Schocher 	if (err) {
691*ff94bc40SHeiko Schocher 		ubi_err("new PEB %d does not contain all 0xFF bytes", peb);
692*ff94bc40SHeiko Schocher 		return err;
693*ff94bc40SHeiko Schocher 	}
694*ff94bc40SHeiko Schocher 
695*ff94bc40SHeiko Schocher 	return peb;
696*ff94bc40SHeiko Schocher }
697*ff94bc40SHeiko Schocher #endif
698*ff94bc40SHeiko Schocher 
699*ff94bc40SHeiko Schocher /**
700*ff94bc40SHeiko Schocher  * prot_queue_del - remove a physical eraseblock from the protection queue.
701c91a719dSKyungmin Park  * @ubi: UBI device description object
702c91a719dSKyungmin Park  * @pnum: the physical eraseblock to remove
703c91a719dSKyungmin Park  *
704*ff94bc40SHeiko Schocher  * This function deletes PEB @pnum from the protection queue and returns zero
705*ff94bc40SHeiko Schocher  * in case of success and %-ENODEV if the PEB was not found.
706c91a719dSKyungmin Park  */
707*ff94bc40SHeiko Schocher static int prot_queue_del(struct ubi_device *ubi, int pnum)
708c91a719dSKyungmin Park {
709*ff94bc40SHeiko Schocher 	struct ubi_wl_entry *e;
710c91a719dSKyungmin Park 
711*ff94bc40SHeiko Schocher 	e = ubi->lookuptbl[pnum];
712*ff94bc40SHeiko Schocher 	if (!e)
713c91a719dSKyungmin Park 		return -ENODEV;
714c91a719dSKyungmin Park 
715*ff94bc40SHeiko Schocher 	if (self_check_in_pq(ubi, e))
716*ff94bc40SHeiko Schocher 		return -ENODEV;
717*ff94bc40SHeiko Schocher 
718*ff94bc40SHeiko Schocher 	list_del(&e->u.list);
719*ff94bc40SHeiko Schocher 	dbg_wl("deleted PEB %d from the protection queue", e->pnum);
720c91a719dSKyungmin Park 	return 0;
721c91a719dSKyungmin Park }
722c91a719dSKyungmin Park 
723c91a719dSKyungmin Park /**
724c91a719dSKyungmin Park  * sync_erase - synchronously erase a physical eraseblock.
725c91a719dSKyungmin Park  * @ubi: UBI device description object
726c91a719dSKyungmin Park  * @e: the the physical eraseblock to erase
727c91a719dSKyungmin Park  * @torture: if the physical eraseblock has to be tortured
728c91a719dSKyungmin Park  *
729c91a719dSKyungmin Park  * This function returns zero in case of success and a negative error code in
730c91a719dSKyungmin Park  * case of failure.
731c91a719dSKyungmin Park  */
732*ff94bc40SHeiko Schocher static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
733*ff94bc40SHeiko Schocher 		      int torture)
734c91a719dSKyungmin Park {
735c91a719dSKyungmin Park 	int err;
736c91a719dSKyungmin Park 	struct ubi_ec_hdr *ec_hdr;
737c91a719dSKyungmin Park 	unsigned long long ec = e->ec;
738c91a719dSKyungmin Park 
739c91a719dSKyungmin Park 	dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec);
740c91a719dSKyungmin Park 
741*ff94bc40SHeiko Schocher 	err = self_check_ec(ubi, e->pnum, e->ec);
742*ff94bc40SHeiko Schocher 	if (err)
743c91a719dSKyungmin Park 		return -EINVAL;
744c91a719dSKyungmin Park 
745c91a719dSKyungmin Park 	ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
746c91a719dSKyungmin Park 	if (!ec_hdr)
747c91a719dSKyungmin Park 		return -ENOMEM;
748c91a719dSKyungmin Park 
749c91a719dSKyungmin Park 	err = ubi_io_sync_erase(ubi, e->pnum, torture);
750c91a719dSKyungmin Park 	if (err < 0)
751c91a719dSKyungmin Park 		goto out_free;
752c91a719dSKyungmin Park 
753c91a719dSKyungmin Park 	ec += err;
754c91a719dSKyungmin Park 	if (ec > UBI_MAX_ERASECOUNTER) {
755c91a719dSKyungmin Park 		/*
756c91a719dSKyungmin Park 		 * Erase counter overflow. Upgrade UBI and use 64-bit
757c91a719dSKyungmin Park 		 * erase counters internally.
758c91a719dSKyungmin Park 		 */
759c91a719dSKyungmin Park 		ubi_err("erase counter overflow at PEB %d, EC %llu",
760c91a719dSKyungmin Park 			e->pnum, ec);
761c91a719dSKyungmin Park 		err = -EINVAL;
762c91a719dSKyungmin Park 		goto out_free;
763c91a719dSKyungmin Park 	}
764c91a719dSKyungmin Park 
765c91a719dSKyungmin Park 	dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec);
766c91a719dSKyungmin Park 
767c91a719dSKyungmin Park 	ec_hdr->ec = cpu_to_be64(ec);
768c91a719dSKyungmin Park 
769c91a719dSKyungmin Park 	err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr);
770c91a719dSKyungmin Park 	if (err)
771c91a719dSKyungmin Park 		goto out_free;
772c91a719dSKyungmin Park 
773c91a719dSKyungmin Park 	e->ec = ec;
774c91a719dSKyungmin Park 	spin_lock(&ubi->wl_lock);
775c91a719dSKyungmin Park 	if (e->ec > ubi->max_ec)
776c91a719dSKyungmin Park 		ubi->max_ec = e->ec;
777c91a719dSKyungmin Park 	spin_unlock(&ubi->wl_lock);
778c91a719dSKyungmin Park 
779c91a719dSKyungmin Park out_free:
780c91a719dSKyungmin Park 	kfree(ec_hdr);
781c91a719dSKyungmin Park 	return err;
782c91a719dSKyungmin Park }
783c91a719dSKyungmin Park 
784c91a719dSKyungmin Park /**
785*ff94bc40SHeiko Schocher  * serve_prot_queue - check if it is time to stop protecting PEBs.
786c91a719dSKyungmin Park  * @ubi: UBI device description object
787c91a719dSKyungmin Park  *
788*ff94bc40SHeiko Schocher  * This function is called after each erase operation and removes PEBs from the
789*ff94bc40SHeiko Schocher  * tail of the protection queue. These PEBs have been protected for long enough
790*ff94bc40SHeiko Schocher  * and should be moved to the used tree.
791c91a719dSKyungmin Park  */
792*ff94bc40SHeiko Schocher static void serve_prot_queue(struct ubi_device *ubi)
793c91a719dSKyungmin Park {
794*ff94bc40SHeiko Schocher 	struct ubi_wl_entry *e, *tmp;
795*ff94bc40SHeiko Schocher 	int count;
796c91a719dSKyungmin Park 
797c91a719dSKyungmin Park 	/*
798c91a719dSKyungmin Park 	 * There may be several protected physical eraseblock to remove,
799c91a719dSKyungmin Park 	 * process them all.
800c91a719dSKyungmin Park 	 */
801*ff94bc40SHeiko Schocher repeat:
802*ff94bc40SHeiko Schocher 	count = 0;
803c91a719dSKyungmin Park 	spin_lock(&ubi->wl_lock);
804*ff94bc40SHeiko Schocher 	list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) {
805*ff94bc40SHeiko Schocher 		dbg_wl("PEB %d EC %d protection over, move to used tree",
806*ff94bc40SHeiko Schocher 			e->pnum, e->ec);
807*ff94bc40SHeiko Schocher 
808*ff94bc40SHeiko Schocher 		list_del(&e->u.list);
809*ff94bc40SHeiko Schocher 		wl_tree_add(e, &ubi->used);
810*ff94bc40SHeiko Schocher 		if (count++ > 32) {
811*ff94bc40SHeiko Schocher 			/*
812*ff94bc40SHeiko Schocher 			 * Let's be nice and avoid holding the spinlock for
813*ff94bc40SHeiko Schocher 			 * too long.
814*ff94bc40SHeiko Schocher 			 */
815c91a719dSKyungmin Park 			spin_unlock(&ubi->wl_lock);
816c91a719dSKyungmin Park 			cond_resched();
817*ff94bc40SHeiko Schocher 			goto repeat;
818c91a719dSKyungmin Park 		}
819c91a719dSKyungmin Park 	}
820c91a719dSKyungmin Park 
821*ff94bc40SHeiko Schocher 	ubi->pq_head += 1;
822*ff94bc40SHeiko Schocher 	if (ubi->pq_head == UBI_PROT_QUEUE_LEN)
823*ff94bc40SHeiko Schocher 		ubi->pq_head = 0;
824*ff94bc40SHeiko Schocher 	ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN);
825*ff94bc40SHeiko Schocher 	spin_unlock(&ubi->wl_lock);
826*ff94bc40SHeiko Schocher }
827*ff94bc40SHeiko Schocher 
828*ff94bc40SHeiko Schocher /**
829*ff94bc40SHeiko Schocher  * __schedule_ubi_work - schedule a work.
830*ff94bc40SHeiko Schocher  * @ubi: UBI device description object
831*ff94bc40SHeiko Schocher  * @wrk: the work to schedule
832*ff94bc40SHeiko Schocher  *
833*ff94bc40SHeiko Schocher  * This function adds a work defined by @wrk to the tail of the pending works
834*ff94bc40SHeiko Schocher  * list. Can only be used of ubi->work_sem is already held in read mode!
835*ff94bc40SHeiko Schocher  */
836*ff94bc40SHeiko Schocher static void __schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
837*ff94bc40SHeiko Schocher {
838*ff94bc40SHeiko Schocher 	spin_lock(&ubi->wl_lock);
839*ff94bc40SHeiko Schocher 	list_add_tail(&wrk->list, &ubi->works);
840*ff94bc40SHeiko Schocher 	ubi_assert(ubi->works_count >= 0);
841*ff94bc40SHeiko Schocher 	ubi->works_count += 1;
842*ff94bc40SHeiko Schocher #ifndef __UBOOT__
843*ff94bc40SHeiko Schocher 	if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled(ubi))
844*ff94bc40SHeiko Schocher 		wake_up_process(ubi->bgt_thread);
845*ff94bc40SHeiko Schocher #else
846*ff94bc40SHeiko Schocher 	/*
847*ff94bc40SHeiko Schocher 	 * U-Boot special: We have no bgt_thread in U-Boot!
848*ff94bc40SHeiko Schocher 	 * So just call do_work() here directly.
849*ff94bc40SHeiko Schocher 	 */
850*ff94bc40SHeiko Schocher 	do_work(ubi);
851*ff94bc40SHeiko Schocher #endif
852*ff94bc40SHeiko Schocher 	spin_unlock(&ubi->wl_lock);
853*ff94bc40SHeiko Schocher }
854*ff94bc40SHeiko Schocher 
855c91a719dSKyungmin Park /**
856c91a719dSKyungmin Park  * schedule_ubi_work - schedule a work.
857c91a719dSKyungmin Park  * @ubi: UBI device description object
858c91a719dSKyungmin Park  * @wrk: the work to schedule
859c91a719dSKyungmin Park  *
860*ff94bc40SHeiko Schocher  * This function adds a work defined by @wrk to the tail of the pending works
861*ff94bc40SHeiko Schocher  * list.
862c91a719dSKyungmin Park  */
863c91a719dSKyungmin Park static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
864c91a719dSKyungmin Park {
865*ff94bc40SHeiko Schocher 	down_read(&ubi->work_sem);
866*ff94bc40SHeiko Schocher 	__schedule_ubi_work(ubi, wrk);
867*ff94bc40SHeiko Schocher 	up_read(&ubi->work_sem);
868c91a719dSKyungmin Park }
869c91a719dSKyungmin Park 
870c91a719dSKyungmin Park static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
871c91a719dSKyungmin Park 			int cancel);
872c91a719dSKyungmin Park 
873*ff94bc40SHeiko Schocher #ifdef CONFIG_MTD_UBI_FASTMAP
874*ff94bc40SHeiko Schocher /**
875*ff94bc40SHeiko Schocher  * ubi_is_erase_work - checks whether a work is erase work.
876*ff94bc40SHeiko Schocher  * @wrk: The work object to be checked
877*ff94bc40SHeiko Schocher  */
878*ff94bc40SHeiko Schocher int ubi_is_erase_work(struct ubi_work *wrk)
879*ff94bc40SHeiko Schocher {
880*ff94bc40SHeiko Schocher 	return wrk->func == erase_worker;
881*ff94bc40SHeiko Schocher }
882*ff94bc40SHeiko Schocher #endif
883*ff94bc40SHeiko Schocher 
884c91a719dSKyungmin Park /**
885c91a719dSKyungmin Park  * schedule_erase - schedule an erase work.
886c91a719dSKyungmin Park  * @ubi: UBI device description object
887c91a719dSKyungmin Park  * @e: the WL entry of the physical eraseblock to erase
888*ff94bc40SHeiko Schocher  * @vol_id: the volume ID that last used this PEB
889*ff94bc40SHeiko Schocher  * @lnum: the last used logical eraseblock number for the PEB
890c91a719dSKyungmin Park  * @torture: if the physical eraseblock has to be tortured
891c91a719dSKyungmin Park  *
892c91a719dSKyungmin Park  * This function returns zero in case of success and a %-ENOMEM in case of
893c91a719dSKyungmin Park  * failure.
894c91a719dSKyungmin Park  */
895c91a719dSKyungmin Park static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
896*ff94bc40SHeiko Schocher 			  int vol_id, int lnum, int torture)
897c91a719dSKyungmin Park {
898c91a719dSKyungmin Park 	struct ubi_work *wl_wrk;
899c91a719dSKyungmin Park 
900*ff94bc40SHeiko Schocher 	ubi_assert(e);
901*ff94bc40SHeiko Schocher 	ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
902*ff94bc40SHeiko Schocher 
903c91a719dSKyungmin Park 	dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
904c91a719dSKyungmin Park 	       e->pnum, e->ec, torture);
905c91a719dSKyungmin Park 
906c91a719dSKyungmin Park 	wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
907c91a719dSKyungmin Park 	if (!wl_wrk)
908c91a719dSKyungmin Park 		return -ENOMEM;
909c91a719dSKyungmin Park 
910c91a719dSKyungmin Park 	wl_wrk->func = &erase_worker;
911c91a719dSKyungmin Park 	wl_wrk->e = e;
912*ff94bc40SHeiko Schocher 	wl_wrk->vol_id = vol_id;
913*ff94bc40SHeiko Schocher 	wl_wrk->lnum = lnum;
914c91a719dSKyungmin Park 	wl_wrk->torture = torture;
915c91a719dSKyungmin Park 
916c91a719dSKyungmin Park 	schedule_ubi_work(ubi, wl_wrk);
917c91a719dSKyungmin Park 	return 0;
918c91a719dSKyungmin Park }
919c91a719dSKyungmin Park 
920c91a719dSKyungmin Park /**
921*ff94bc40SHeiko Schocher  * do_sync_erase - run the erase worker synchronously.
922*ff94bc40SHeiko Schocher  * @ubi: UBI device description object
923*ff94bc40SHeiko Schocher  * @e: the WL entry of the physical eraseblock to erase
924*ff94bc40SHeiko Schocher  * @vol_id: the volume ID that last used this PEB
925*ff94bc40SHeiko Schocher  * @lnum: the last used logical eraseblock number for the PEB
926*ff94bc40SHeiko Schocher  * @torture: if the physical eraseblock has to be tortured
927*ff94bc40SHeiko Schocher  *
928*ff94bc40SHeiko Schocher  */
929*ff94bc40SHeiko Schocher static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
930*ff94bc40SHeiko Schocher 			 int vol_id, int lnum, int torture)
931*ff94bc40SHeiko Schocher {
932*ff94bc40SHeiko Schocher 	struct ubi_work *wl_wrk;
933*ff94bc40SHeiko Schocher 
934*ff94bc40SHeiko Schocher 	dbg_wl("sync erase of PEB %i", e->pnum);
935*ff94bc40SHeiko Schocher 
936*ff94bc40SHeiko Schocher 	wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
937*ff94bc40SHeiko Schocher 	if (!wl_wrk)
938*ff94bc40SHeiko Schocher 		return -ENOMEM;
939*ff94bc40SHeiko Schocher 
940*ff94bc40SHeiko Schocher 	wl_wrk->e = e;
941*ff94bc40SHeiko Schocher 	wl_wrk->vol_id = vol_id;
942*ff94bc40SHeiko Schocher 	wl_wrk->lnum = lnum;
943*ff94bc40SHeiko Schocher 	wl_wrk->torture = torture;
944*ff94bc40SHeiko Schocher 
945*ff94bc40SHeiko Schocher 	return erase_worker(ubi, wl_wrk, 0);
946*ff94bc40SHeiko Schocher }
947*ff94bc40SHeiko Schocher 
948*ff94bc40SHeiko Schocher #ifdef CONFIG_MTD_UBI_FASTMAP
949*ff94bc40SHeiko Schocher /**
950*ff94bc40SHeiko Schocher  * ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling
951*ff94bc40SHeiko Schocher  * sub-system.
952*ff94bc40SHeiko Schocher  * see: ubi_wl_put_peb()
953*ff94bc40SHeiko Schocher  *
954*ff94bc40SHeiko Schocher  * @ubi: UBI device description object
955*ff94bc40SHeiko Schocher  * @fm_e: physical eraseblock to return
956*ff94bc40SHeiko Schocher  * @lnum: the last used logical eraseblock number for the PEB
957*ff94bc40SHeiko Schocher  * @torture: if this physical eraseblock has to be tortured
958*ff94bc40SHeiko Schocher  */
959*ff94bc40SHeiko Schocher int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
960*ff94bc40SHeiko Schocher 		      int lnum, int torture)
961*ff94bc40SHeiko Schocher {
962*ff94bc40SHeiko Schocher 	struct ubi_wl_entry *e;
963*ff94bc40SHeiko Schocher 	int vol_id, pnum = fm_e->pnum;
964*ff94bc40SHeiko Schocher 
965*ff94bc40SHeiko Schocher 	dbg_wl("PEB %d", pnum);
966*ff94bc40SHeiko Schocher 
967*ff94bc40SHeiko Schocher 	ubi_assert(pnum >= 0);
968*ff94bc40SHeiko Schocher 	ubi_assert(pnum < ubi->peb_count);
969*ff94bc40SHeiko Schocher 
970*ff94bc40SHeiko Schocher 	spin_lock(&ubi->wl_lock);
971*ff94bc40SHeiko Schocher 	e = ubi->lookuptbl[pnum];
972*ff94bc40SHeiko Schocher 
973*ff94bc40SHeiko Schocher 	/* This can happen if we recovered from a fastmap the very
974*ff94bc40SHeiko Schocher 	 * first time and writing now a new one. In this case the wl system
975*ff94bc40SHeiko Schocher 	 * has never seen any PEB used by the original fastmap.
976*ff94bc40SHeiko Schocher 	 */
977*ff94bc40SHeiko Schocher 	if (!e) {
978*ff94bc40SHeiko Schocher 		e = fm_e;
979*ff94bc40SHeiko Schocher 		ubi_assert(e->ec >= 0);
980*ff94bc40SHeiko Schocher 		ubi->lookuptbl[pnum] = e;
981*ff94bc40SHeiko Schocher 	} else {
982*ff94bc40SHeiko Schocher 		e->ec = fm_e->ec;
983*ff94bc40SHeiko Schocher 		kfree(fm_e);
984*ff94bc40SHeiko Schocher 	}
985*ff94bc40SHeiko Schocher 
986*ff94bc40SHeiko Schocher 	spin_unlock(&ubi->wl_lock);
987*ff94bc40SHeiko Schocher 
988*ff94bc40SHeiko Schocher 	vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
989*ff94bc40SHeiko Schocher 	return schedule_erase(ubi, e, vol_id, lnum, torture);
990*ff94bc40SHeiko Schocher }
991*ff94bc40SHeiko Schocher #endif
992*ff94bc40SHeiko Schocher 
993*ff94bc40SHeiko Schocher /**
994c91a719dSKyungmin Park  * wear_leveling_worker - wear-leveling worker function.
995c91a719dSKyungmin Park  * @ubi: UBI device description object
996c91a719dSKyungmin Park  * @wrk: the work object
997c91a719dSKyungmin Park  * @cancel: non-zero if the worker has to free memory and exit
998c91a719dSKyungmin Park  *
999c91a719dSKyungmin Park  * This function copies a more worn out physical eraseblock to a less worn out
1000c91a719dSKyungmin Park  * one. Returns zero in case of success and a negative error code in case of
1001c91a719dSKyungmin Park  * failure.
1002c91a719dSKyungmin Park  */
1003c91a719dSKyungmin Park static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
1004c91a719dSKyungmin Park 				int cancel)
1005c91a719dSKyungmin Park {
1006*ff94bc40SHeiko Schocher 	int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
1007*ff94bc40SHeiko Schocher 	int vol_id = -1, uninitialized_var(lnum);
1008*ff94bc40SHeiko Schocher #ifdef CONFIG_MTD_UBI_FASTMAP
1009*ff94bc40SHeiko Schocher 	int anchor = wrk->anchor;
1010*ff94bc40SHeiko Schocher #endif
1011c91a719dSKyungmin Park 	struct ubi_wl_entry *e1, *e2;
1012c91a719dSKyungmin Park 	struct ubi_vid_hdr *vid_hdr;
1013c91a719dSKyungmin Park 
1014c91a719dSKyungmin Park 	kfree(wrk);
1015c91a719dSKyungmin Park 	if (cancel)
1016c91a719dSKyungmin Park 		return 0;
1017c91a719dSKyungmin Park 
1018c91a719dSKyungmin Park 	vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
1019c91a719dSKyungmin Park 	if (!vid_hdr)
1020c91a719dSKyungmin Park 		return -ENOMEM;
1021c91a719dSKyungmin Park 
1022c91a719dSKyungmin Park 	mutex_lock(&ubi->move_mutex);
1023c91a719dSKyungmin Park 	spin_lock(&ubi->wl_lock);
1024c91a719dSKyungmin Park 	ubi_assert(!ubi->move_from && !ubi->move_to);
1025c91a719dSKyungmin Park 	ubi_assert(!ubi->move_to_put);
1026c91a719dSKyungmin Park 
1027c91a719dSKyungmin Park 	if (!ubi->free.rb_node ||
1028c91a719dSKyungmin Park 	    (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
1029c91a719dSKyungmin Park 		/*
1030c91a719dSKyungmin Park 		 * No free physical eraseblocks? Well, they must be waiting in
1031c91a719dSKyungmin Park 		 * the queue to be erased. Cancel movement - it will be
1032c91a719dSKyungmin Park 		 * triggered again when a free physical eraseblock appears.
1033c91a719dSKyungmin Park 		 *
1034c91a719dSKyungmin Park 		 * No used physical eraseblocks? They must be temporarily
1035c91a719dSKyungmin Park 		 * protected from being moved. They will be moved to the
1036c91a719dSKyungmin Park 		 * @ubi->used tree later and the wear-leveling will be
1037c91a719dSKyungmin Park 		 * triggered again.
1038c91a719dSKyungmin Park 		 */
1039c91a719dSKyungmin Park 		dbg_wl("cancel WL, a list is empty: free %d, used %d",
1040c91a719dSKyungmin Park 		       !ubi->free.rb_node, !ubi->used.rb_node);
1041c91a719dSKyungmin Park 		goto out_cancel;
1042c91a719dSKyungmin Park 	}
1043c91a719dSKyungmin Park 
1044*ff94bc40SHeiko Schocher #ifdef CONFIG_MTD_UBI_FASTMAP
1045*ff94bc40SHeiko Schocher 	/* Check whether we need to produce an anchor PEB */
1046*ff94bc40SHeiko Schocher 	if (!anchor)
1047*ff94bc40SHeiko Schocher 		anchor = !anchor_pebs_avalible(&ubi->free);
1048*ff94bc40SHeiko Schocher 
1049*ff94bc40SHeiko Schocher 	if (anchor) {
1050*ff94bc40SHeiko Schocher 		e1 = find_anchor_wl_entry(&ubi->used);
1051*ff94bc40SHeiko Schocher 		if (!e1)
1052*ff94bc40SHeiko Schocher 			goto out_cancel;
1053*ff94bc40SHeiko Schocher 		e2 = get_peb_for_wl(ubi);
1054*ff94bc40SHeiko Schocher 		if (!e2)
1055*ff94bc40SHeiko Schocher 			goto out_cancel;
1056*ff94bc40SHeiko Schocher 
1057*ff94bc40SHeiko Schocher 		self_check_in_wl_tree(ubi, e1, &ubi->used);
1058*ff94bc40SHeiko Schocher 		rb_erase(&e1->u.rb, &ubi->used);
1059*ff94bc40SHeiko Schocher 		dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum);
1060*ff94bc40SHeiko Schocher 	} else if (!ubi->scrub.rb_node) {
1061*ff94bc40SHeiko Schocher #else
1062c91a719dSKyungmin Park 	if (!ubi->scrub.rb_node) {
1063*ff94bc40SHeiko Schocher #endif
1064c91a719dSKyungmin Park 		/*
1065c91a719dSKyungmin Park 		 * Now pick the least worn-out used physical eraseblock and a
1066c91a719dSKyungmin Park 		 * highly worn-out free physical eraseblock. If the erase
1067c91a719dSKyungmin Park 		 * counters differ much enough, start wear-leveling.
1068c91a719dSKyungmin Park 		 */
1069*ff94bc40SHeiko Schocher 		e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
1070*ff94bc40SHeiko Schocher 		e2 = get_peb_for_wl(ubi);
1071*ff94bc40SHeiko Schocher 		if (!e2)
1072*ff94bc40SHeiko Schocher 			goto out_cancel;
1073c91a719dSKyungmin Park 
1074c91a719dSKyungmin Park 		if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
1075c91a719dSKyungmin Park 			dbg_wl("no WL needed: min used EC %d, max free EC %d",
1076c91a719dSKyungmin Park 			       e1->ec, e2->ec);
1077*ff94bc40SHeiko Schocher 
1078*ff94bc40SHeiko Schocher 			/* Give the unused PEB back */
1079*ff94bc40SHeiko Schocher 			wl_tree_add(e2, &ubi->free);
1080c91a719dSKyungmin Park 			goto out_cancel;
1081c91a719dSKyungmin Park 		}
1082*ff94bc40SHeiko Schocher 		self_check_in_wl_tree(ubi, e1, &ubi->used);
1083*ff94bc40SHeiko Schocher 		rb_erase(&e1->u.rb, &ubi->used);
1084c91a719dSKyungmin Park 		dbg_wl("move PEB %d EC %d to PEB %d EC %d",
1085c91a719dSKyungmin Park 		       e1->pnum, e1->ec, e2->pnum, e2->ec);
1086c91a719dSKyungmin Park 	} else {
1087c91a719dSKyungmin Park 		/* Perform scrubbing */
1088c91a719dSKyungmin Park 		scrubbing = 1;
1089*ff94bc40SHeiko Schocher 		e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
1090*ff94bc40SHeiko Schocher 		e2 = get_peb_for_wl(ubi);
1091*ff94bc40SHeiko Schocher 		if (!e2)
1092*ff94bc40SHeiko Schocher 			goto out_cancel;
1093*ff94bc40SHeiko Schocher 
1094*ff94bc40SHeiko Schocher 		self_check_in_wl_tree(ubi, e1, &ubi->scrub);
1095*ff94bc40SHeiko Schocher 		rb_erase(&e1->u.rb, &ubi->scrub);
1096c91a719dSKyungmin Park 		dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
1097c91a719dSKyungmin Park 	}
1098c91a719dSKyungmin Park 
1099c91a719dSKyungmin Park 	ubi->move_from = e1;
1100c91a719dSKyungmin Park 	ubi->move_to = e2;
1101c91a719dSKyungmin Park 	spin_unlock(&ubi->wl_lock);
1102c91a719dSKyungmin Park 
1103c91a719dSKyungmin Park 	/*
1104c91a719dSKyungmin Park 	 * Now we are going to copy physical eraseblock @e1->pnum to @e2->pnum.
1105c91a719dSKyungmin Park 	 * We so far do not know which logical eraseblock our physical
1106c91a719dSKyungmin Park 	 * eraseblock (@e1) belongs to. We have to read the volume identifier
1107c91a719dSKyungmin Park 	 * header first.
1108c91a719dSKyungmin Park 	 *
1109c91a719dSKyungmin Park 	 * Note, we are protected from this PEB being unmapped and erased. The
1110c91a719dSKyungmin Park 	 * 'ubi_wl_put_peb()' would wait for moving to be finished if the PEB
1111c91a719dSKyungmin Park 	 * which is being moved was unmapped.
1112c91a719dSKyungmin Park 	 */
1113c91a719dSKyungmin Park 
1114c91a719dSKyungmin Park 	err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
1115c91a719dSKyungmin Park 	if (err && err != UBI_IO_BITFLIPS) {
1116*ff94bc40SHeiko Schocher 		if (err == UBI_IO_FF) {
1117c91a719dSKyungmin Park 			/*
1118c91a719dSKyungmin Park 			 * We are trying to move PEB without a VID header. UBI
1119c91a719dSKyungmin Park 			 * always write VID headers shortly after the PEB was
1120*ff94bc40SHeiko Schocher 			 * given, so we have a situation when it has not yet
1121*ff94bc40SHeiko Schocher 			 * had a chance to write it, because it was preempted.
1122*ff94bc40SHeiko Schocher 			 * So add this PEB to the protection queue so far,
1123*ff94bc40SHeiko Schocher 			 * because presumably more data will be written there
1124*ff94bc40SHeiko Schocher 			 * (including the missing VID header), and then we'll
1125*ff94bc40SHeiko Schocher 			 * move it.
1126c91a719dSKyungmin Park 			 */
1127c91a719dSKyungmin Park 			dbg_wl("PEB %d has no VID header", e1->pnum);
1128*ff94bc40SHeiko Schocher 			protect = 1;
1129*ff94bc40SHeiko Schocher 			goto out_not_moved;
1130*ff94bc40SHeiko Schocher 		} else if (err == UBI_IO_FF_BITFLIPS) {
1131*ff94bc40SHeiko Schocher 			/*
1132*ff94bc40SHeiko Schocher 			 * The same situation as %UBI_IO_FF, but bit-flips were
1133*ff94bc40SHeiko Schocher 			 * detected. It is better to schedule this PEB for
1134*ff94bc40SHeiko Schocher 			 * scrubbing.
1135*ff94bc40SHeiko Schocher 			 */
1136*ff94bc40SHeiko Schocher 			dbg_wl("PEB %d has no VID header but has bit-flips",
1137*ff94bc40SHeiko Schocher 			       e1->pnum);
1138*ff94bc40SHeiko Schocher 			scrubbing = 1;
1139c91a719dSKyungmin Park 			goto out_not_moved;
1140c91a719dSKyungmin Park 		}
1141c91a719dSKyungmin Park 
1142c91a719dSKyungmin Park 		ubi_err("error %d while reading VID header from PEB %d",
1143c91a719dSKyungmin Park 			err, e1->pnum);
1144c91a719dSKyungmin Park 		goto out_error;
1145c91a719dSKyungmin Park 	}
1146c91a719dSKyungmin Park 
1147*ff94bc40SHeiko Schocher 	vol_id = be32_to_cpu(vid_hdr->vol_id);
1148*ff94bc40SHeiko Schocher 	lnum = be32_to_cpu(vid_hdr->lnum);
1149*ff94bc40SHeiko Schocher 
1150c91a719dSKyungmin Park 	err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
1151c91a719dSKyungmin Park 	if (err) {
1152*ff94bc40SHeiko Schocher 		if (err == MOVE_CANCEL_RACE) {
1153*ff94bc40SHeiko Schocher 			/*
1154*ff94bc40SHeiko Schocher 			 * The LEB has not been moved because the volume is
1155*ff94bc40SHeiko Schocher 			 * being deleted or the PEB has been put meanwhile. We
1156*ff94bc40SHeiko Schocher 			 * should prevent this PEB from being selected for
1157*ff94bc40SHeiko Schocher 			 * wear-leveling movement again, so put it to the
1158*ff94bc40SHeiko Schocher 			 * protection queue.
1159*ff94bc40SHeiko Schocher 			 */
1160*ff94bc40SHeiko Schocher 			protect = 1;
1161*ff94bc40SHeiko Schocher 			goto out_not_moved;
1162*ff94bc40SHeiko Schocher 		}
1163*ff94bc40SHeiko Schocher 		if (err == MOVE_RETRY) {
1164*ff94bc40SHeiko Schocher 			scrubbing = 1;
1165*ff94bc40SHeiko Schocher 			goto out_not_moved;
1166*ff94bc40SHeiko Schocher 		}
1167*ff94bc40SHeiko Schocher 		if (err == MOVE_TARGET_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
1168*ff94bc40SHeiko Schocher 		    err == MOVE_TARGET_RD_ERR) {
1169*ff94bc40SHeiko Schocher 			/*
1170*ff94bc40SHeiko Schocher 			 * Target PEB had bit-flips or write error - torture it.
1171*ff94bc40SHeiko Schocher 			 */
1172*ff94bc40SHeiko Schocher 			torture = 1;
1173*ff94bc40SHeiko Schocher 			goto out_not_moved;
1174*ff94bc40SHeiko Schocher 		}
1175*ff94bc40SHeiko Schocher 
1176*ff94bc40SHeiko Schocher 		if (err == MOVE_SOURCE_RD_ERR) {
1177*ff94bc40SHeiko Schocher 			/*
1178*ff94bc40SHeiko Schocher 			 * An error happened while reading the source PEB. Do
1179*ff94bc40SHeiko Schocher 			 * not switch to R/O mode in this case, and give the
1180*ff94bc40SHeiko Schocher 			 * upper layers a possibility to recover from this,
1181*ff94bc40SHeiko Schocher 			 * e.g. by unmapping corresponding LEB. Instead, just
1182*ff94bc40SHeiko Schocher 			 * put this PEB to the @ubi->erroneous list to prevent
1183*ff94bc40SHeiko Schocher 			 * UBI from trying to move it over and over again.
1184*ff94bc40SHeiko Schocher 			 */
1185*ff94bc40SHeiko Schocher 			if (ubi->erroneous_peb_count > ubi->max_erroneous) {
1186*ff94bc40SHeiko Schocher 				ubi_err("too many erroneous eraseblocks (%d)",
1187*ff94bc40SHeiko Schocher 					ubi->erroneous_peb_count);
1188*ff94bc40SHeiko Schocher 				goto out_error;
1189*ff94bc40SHeiko Schocher 			}
1190*ff94bc40SHeiko Schocher 			erroneous = 1;
1191*ff94bc40SHeiko Schocher 			goto out_not_moved;
1192*ff94bc40SHeiko Schocher 		}
1193c91a719dSKyungmin Park 
1194c91a719dSKyungmin Park 		if (err < 0)
1195c91a719dSKyungmin Park 			goto out_error;
1196c91a719dSKyungmin Park 
1197*ff94bc40SHeiko Schocher 		ubi_assert(0);
1198c91a719dSKyungmin Park 	}
1199c91a719dSKyungmin Park 
1200*ff94bc40SHeiko Schocher 	/* The PEB has been successfully moved */
1201*ff94bc40SHeiko Schocher 	if (scrubbing)
1202*ff94bc40SHeiko Schocher 		ubi_msg("scrubbed PEB %d (LEB %d:%d), data moved to PEB %d",
1203*ff94bc40SHeiko Schocher 			e1->pnum, vol_id, lnum, e2->pnum);
1204c91a719dSKyungmin Park 	ubi_free_vid_hdr(ubi, vid_hdr);
1205*ff94bc40SHeiko Schocher 
1206c91a719dSKyungmin Park 	spin_lock(&ubi->wl_lock);
1207*ff94bc40SHeiko Schocher 	if (!ubi->move_to_put) {
1208c91a719dSKyungmin Park 		wl_tree_add(e2, &ubi->used);
1209*ff94bc40SHeiko Schocher 		e2 = NULL;
1210*ff94bc40SHeiko Schocher 	}
1211c91a719dSKyungmin Park 	ubi->move_from = ubi->move_to = NULL;
1212c91a719dSKyungmin Park 	ubi->move_to_put = ubi->wl_scheduled = 0;
1213c91a719dSKyungmin Park 	spin_unlock(&ubi->wl_lock);
1214c91a719dSKyungmin Park 
1215*ff94bc40SHeiko Schocher 	err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
1216*ff94bc40SHeiko Schocher 	if (err) {
1217*ff94bc40SHeiko Schocher 		kmem_cache_free(ubi_wl_entry_slab, e1);
1218*ff94bc40SHeiko Schocher 		if (e2)
1219*ff94bc40SHeiko Schocher 			kmem_cache_free(ubi_wl_entry_slab, e2);
1220*ff94bc40SHeiko Schocher 		goto out_ro;
1221*ff94bc40SHeiko Schocher 	}
1222*ff94bc40SHeiko Schocher 
1223*ff94bc40SHeiko Schocher 	if (e2) {
1224c91a719dSKyungmin Park 		/*
1225c91a719dSKyungmin Park 		 * Well, the target PEB was put meanwhile, schedule it for
1226c91a719dSKyungmin Park 		 * erasure.
1227c91a719dSKyungmin Park 		 */
1228*ff94bc40SHeiko Schocher 		dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase",
1229*ff94bc40SHeiko Schocher 		       e2->pnum, vol_id, lnum);
1230*ff94bc40SHeiko Schocher 		err = do_sync_erase(ubi, e2, vol_id, lnum, 0);
1231*ff94bc40SHeiko Schocher 		if (err) {
1232*ff94bc40SHeiko Schocher 			kmem_cache_free(ubi_wl_entry_slab, e2);
1233*ff94bc40SHeiko Schocher 			goto out_ro;
1234c91a719dSKyungmin Park 		}
1235c91a719dSKyungmin Park 	}
1236c91a719dSKyungmin Park 
1237c91a719dSKyungmin Park 	dbg_wl("done");
1238c91a719dSKyungmin Park 	mutex_unlock(&ubi->move_mutex);
1239c91a719dSKyungmin Park 	return 0;
1240c91a719dSKyungmin Park 
1241c91a719dSKyungmin Park 	/*
1242c91a719dSKyungmin Park 	 * For some reasons the LEB was not moved, might be an error, might be
1243c91a719dSKyungmin Park 	 * something else. @e1 was not changed, so return it back. @e2 might
1244*ff94bc40SHeiko Schocher 	 * have been changed, schedule it for erasure.
1245c91a719dSKyungmin Park 	 */
1246c91a719dSKyungmin Park out_not_moved:
1247*ff94bc40SHeiko Schocher 	if (vol_id != -1)
1248*ff94bc40SHeiko Schocher 		dbg_wl("cancel moving PEB %d (LEB %d:%d) to PEB %d (%d)",
1249*ff94bc40SHeiko Schocher 		       e1->pnum, vol_id, lnum, e2->pnum, err);
1250*ff94bc40SHeiko Schocher 	else
1251*ff94bc40SHeiko Schocher 		dbg_wl("cancel moving PEB %d to PEB %d (%d)",
1252*ff94bc40SHeiko Schocher 		       e1->pnum, e2->pnum, err);
1253c91a719dSKyungmin Park 	spin_lock(&ubi->wl_lock);
1254*ff94bc40SHeiko Schocher 	if (protect)
1255*ff94bc40SHeiko Schocher 		prot_queue_add(ubi, e1);
1256*ff94bc40SHeiko Schocher 	else if (erroneous) {
1257*ff94bc40SHeiko Schocher 		wl_tree_add(e1, &ubi->erroneous);
1258*ff94bc40SHeiko Schocher 		ubi->erroneous_peb_count += 1;
1259*ff94bc40SHeiko Schocher 	} else if (scrubbing)
1260c91a719dSKyungmin Park 		wl_tree_add(e1, &ubi->scrub);
1261c91a719dSKyungmin Park 	else
1262c91a719dSKyungmin Park 		wl_tree_add(e1, &ubi->used);
1263*ff94bc40SHeiko Schocher 	ubi_assert(!ubi->move_to_put);
1264c91a719dSKyungmin Park 	ubi->move_from = ubi->move_to = NULL;
1265*ff94bc40SHeiko Schocher 	ubi->wl_scheduled = 0;
1266c91a719dSKyungmin Park 	spin_unlock(&ubi->wl_lock);
1267c91a719dSKyungmin Park 
1268*ff94bc40SHeiko Schocher 	ubi_free_vid_hdr(ubi, vid_hdr);
1269*ff94bc40SHeiko Schocher 	err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
1270*ff94bc40SHeiko Schocher 	if (err) {
1271*ff94bc40SHeiko Schocher 		kmem_cache_free(ubi_wl_entry_slab, e2);
1272*ff94bc40SHeiko Schocher 		goto out_ro;
1273*ff94bc40SHeiko Schocher 	}
1274c91a719dSKyungmin Park 	mutex_unlock(&ubi->move_mutex);
1275c91a719dSKyungmin Park 	return 0;
1276c91a719dSKyungmin Park 
1277c91a719dSKyungmin Park out_error:
1278*ff94bc40SHeiko Schocher 	if (vol_id != -1)
1279c91a719dSKyungmin Park 		ubi_err("error %d while moving PEB %d to PEB %d",
1280c91a719dSKyungmin Park 			err, e1->pnum, e2->pnum);
1281*ff94bc40SHeiko Schocher 	else
1282*ff94bc40SHeiko Schocher 		ubi_err("error %d while moving PEB %d (LEB %d:%d) to PEB %d",
1283*ff94bc40SHeiko Schocher 			err, e1->pnum, vol_id, lnum, e2->pnum);
1284c91a719dSKyungmin Park 	spin_lock(&ubi->wl_lock);
1285c91a719dSKyungmin Park 	ubi->move_from = ubi->move_to = NULL;
1286c91a719dSKyungmin Park 	ubi->move_to_put = ubi->wl_scheduled = 0;
1287c91a719dSKyungmin Park 	spin_unlock(&ubi->wl_lock);
1288c91a719dSKyungmin Park 
1289*ff94bc40SHeiko Schocher 	ubi_free_vid_hdr(ubi, vid_hdr);
1290c91a719dSKyungmin Park 	kmem_cache_free(ubi_wl_entry_slab, e1);
1291c91a719dSKyungmin Park 	kmem_cache_free(ubi_wl_entry_slab, e2);
1292c91a719dSKyungmin Park 
1293*ff94bc40SHeiko Schocher out_ro:
1294*ff94bc40SHeiko Schocher 	ubi_ro_mode(ubi);
1295c91a719dSKyungmin Park 	mutex_unlock(&ubi->move_mutex);
1296*ff94bc40SHeiko Schocher 	ubi_assert(err != 0);
1297*ff94bc40SHeiko Schocher 	return err < 0 ? err : -EIO;
1298c91a719dSKyungmin Park 
1299c91a719dSKyungmin Park out_cancel:
1300c91a719dSKyungmin Park 	ubi->wl_scheduled = 0;
1301c91a719dSKyungmin Park 	spin_unlock(&ubi->wl_lock);
1302c91a719dSKyungmin Park 	mutex_unlock(&ubi->move_mutex);
1303c91a719dSKyungmin Park 	ubi_free_vid_hdr(ubi, vid_hdr);
1304c91a719dSKyungmin Park 	return 0;
1305c91a719dSKyungmin Park }
1306c91a719dSKyungmin Park 
1307c91a719dSKyungmin Park /**
1308c91a719dSKyungmin Park  * ensure_wear_leveling - schedule wear-leveling if it is needed.
1309c91a719dSKyungmin Park  * @ubi: UBI device description object
1310*ff94bc40SHeiko Schocher  * @nested: set to non-zero if this function is called from UBI worker
1311c91a719dSKyungmin Park  *
1312c91a719dSKyungmin Park  * This function checks if it is time to start wear-leveling and schedules it
1313c91a719dSKyungmin Park  * if yes. This function returns zero in case of success and a negative error
1314c91a719dSKyungmin Park  * code in case of failure.
1315c91a719dSKyungmin Park  */
1316*ff94bc40SHeiko Schocher static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
1317c91a719dSKyungmin Park {
1318c91a719dSKyungmin Park 	int err = 0;
1319c91a719dSKyungmin Park 	struct ubi_wl_entry *e1;
1320c91a719dSKyungmin Park 	struct ubi_wl_entry *e2;
1321c91a719dSKyungmin Park 	struct ubi_work *wrk;
1322c91a719dSKyungmin Park 
1323c91a719dSKyungmin Park 	spin_lock(&ubi->wl_lock);
1324c91a719dSKyungmin Park 	if (ubi->wl_scheduled)
1325c91a719dSKyungmin Park 		/* Wear-leveling is already in the work queue */
1326c91a719dSKyungmin Park 		goto out_unlock;
1327c91a719dSKyungmin Park 
1328c91a719dSKyungmin Park 	/*
1329c91a719dSKyungmin Park 	 * If the ubi->scrub tree is not empty, scrubbing is needed, and the
1330c91a719dSKyungmin Park 	 * the WL worker has to be scheduled anyway.
1331c91a719dSKyungmin Park 	 */
1332c91a719dSKyungmin Park 	if (!ubi->scrub.rb_node) {
1333c91a719dSKyungmin Park 		if (!ubi->used.rb_node || !ubi->free.rb_node)
1334c91a719dSKyungmin Park 			/* No physical eraseblocks - no deal */
1335c91a719dSKyungmin Park 			goto out_unlock;
1336c91a719dSKyungmin Park 
1337c91a719dSKyungmin Park 		/*
1338c91a719dSKyungmin Park 		 * We schedule wear-leveling only if the difference between the
1339c91a719dSKyungmin Park 		 * lowest erase counter of used physical eraseblocks and a high
1340*ff94bc40SHeiko Schocher 		 * erase counter of free physical eraseblocks is greater than
1341c91a719dSKyungmin Park 		 * %UBI_WL_THRESHOLD.
1342c91a719dSKyungmin Park 		 */
1343*ff94bc40SHeiko Schocher 		e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
1344*ff94bc40SHeiko Schocher 		e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
1345c91a719dSKyungmin Park 
1346c91a719dSKyungmin Park 		if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
1347c91a719dSKyungmin Park 			goto out_unlock;
1348c91a719dSKyungmin Park 		dbg_wl("schedule wear-leveling");
1349c91a719dSKyungmin Park 	} else
1350c91a719dSKyungmin Park 		dbg_wl("schedule scrubbing");
1351c91a719dSKyungmin Park 
1352c91a719dSKyungmin Park 	ubi->wl_scheduled = 1;
1353c91a719dSKyungmin Park 	spin_unlock(&ubi->wl_lock);
1354c91a719dSKyungmin Park 
1355c91a719dSKyungmin Park 	wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
1356c91a719dSKyungmin Park 	if (!wrk) {
1357c91a719dSKyungmin Park 		err = -ENOMEM;
1358c91a719dSKyungmin Park 		goto out_cancel;
1359c91a719dSKyungmin Park 	}
1360c91a719dSKyungmin Park 
1361*ff94bc40SHeiko Schocher 	wrk->anchor = 0;
1362c91a719dSKyungmin Park 	wrk->func = &wear_leveling_worker;
1363*ff94bc40SHeiko Schocher 	if (nested)
1364*ff94bc40SHeiko Schocher 		__schedule_ubi_work(ubi, wrk);
1365*ff94bc40SHeiko Schocher 	else
1366c91a719dSKyungmin Park 		schedule_ubi_work(ubi, wrk);
1367c91a719dSKyungmin Park 	return err;
1368c91a719dSKyungmin Park 
1369c91a719dSKyungmin Park out_cancel:
1370c91a719dSKyungmin Park 	spin_lock(&ubi->wl_lock);
1371c91a719dSKyungmin Park 	ubi->wl_scheduled = 0;
1372c91a719dSKyungmin Park out_unlock:
1373c91a719dSKyungmin Park 	spin_unlock(&ubi->wl_lock);
1374c91a719dSKyungmin Park 	return err;
1375c91a719dSKyungmin Park }
1376c91a719dSKyungmin Park 
1377*ff94bc40SHeiko Schocher #ifdef CONFIG_MTD_UBI_FASTMAP
1378*ff94bc40SHeiko Schocher /**
1379*ff94bc40SHeiko Schocher  * ubi_ensure_anchor_pebs - schedule wear-leveling to produce an anchor PEB.
1380*ff94bc40SHeiko Schocher  * @ubi: UBI device description object
1381*ff94bc40SHeiko Schocher  */
1382*ff94bc40SHeiko Schocher int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
1383*ff94bc40SHeiko Schocher {
1384*ff94bc40SHeiko Schocher 	struct ubi_work *wrk;
1385*ff94bc40SHeiko Schocher 
1386*ff94bc40SHeiko Schocher 	spin_lock(&ubi->wl_lock);
1387*ff94bc40SHeiko Schocher 	if (ubi->wl_scheduled) {
1388*ff94bc40SHeiko Schocher 		spin_unlock(&ubi->wl_lock);
1389*ff94bc40SHeiko Schocher 		return 0;
1390*ff94bc40SHeiko Schocher 	}
1391*ff94bc40SHeiko Schocher 	ubi->wl_scheduled = 1;
1392*ff94bc40SHeiko Schocher 	spin_unlock(&ubi->wl_lock);
1393*ff94bc40SHeiko Schocher 
1394*ff94bc40SHeiko Schocher 	wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
1395*ff94bc40SHeiko Schocher 	if (!wrk) {
1396*ff94bc40SHeiko Schocher 		spin_lock(&ubi->wl_lock);
1397*ff94bc40SHeiko Schocher 		ubi->wl_scheduled = 0;
1398*ff94bc40SHeiko Schocher 		spin_unlock(&ubi->wl_lock);
1399*ff94bc40SHeiko Schocher 		return -ENOMEM;
1400*ff94bc40SHeiko Schocher 	}
1401*ff94bc40SHeiko Schocher 
1402*ff94bc40SHeiko Schocher 	wrk->anchor = 1;
1403*ff94bc40SHeiko Schocher 	wrk->func = &wear_leveling_worker;
1404*ff94bc40SHeiko Schocher 	schedule_ubi_work(ubi, wrk);
1405*ff94bc40SHeiko Schocher 	return 0;
1406*ff94bc40SHeiko Schocher }
1407*ff94bc40SHeiko Schocher #endif
1408*ff94bc40SHeiko Schocher 
1409c91a719dSKyungmin Park /**
1410c91a719dSKyungmin Park  * erase_worker - physical eraseblock erase worker function.
1411c91a719dSKyungmin Park  * @ubi: UBI device description object
1412c91a719dSKyungmin Park  * @wl_wrk: the work object
1413c91a719dSKyungmin Park  * @cancel: non-zero if the worker has to free memory and exit
1414c91a719dSKyungmin Park  *
1415c91a719dSKyungmin Park  * This function erases a physical eraseblock and perform torture testing if
1416c91a719dSKyungmin Park  * needed. It also takes care about marking the physical eraseblock bad if
1417c91a719dSKyungmin Park  * needed. Returns zero in case of success and a negative error code in case of
1418c91a719dSKyungmin Park  * failure.
1419c91a719dSKyungmin Park  */
1420c91a719dSKyungmin Park static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1421c91a719dSKyungmin Park 			int cancel)
1422c91a719dSKyungmin Park {
1423c91a719dSKyungmin Park 	struct ubi_wl_entry *e = wl_wrk->e;
1424*ff94bc40SHeiko Schocher 	int pnum = e->pnum;
1425*ff94bc40SHeiko Schocher 	int vol_id = wl_wrk->vol_id;
1426*ff94bc40SHeiko Schocher 	int lnum = wl_wrk->lnum;
1427*ff94bc40SHeiko Schocher 	int err, available_consumed = 0;
1428c91a719dSKyungmin Park 
1429c91a719dSKyungmin Park 	if (cancel) {
1430c91a719dSKyungmin Park 		dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
1431c91a719dSKyungmin Park 		kfree(wl_wrk);
1432c91a719dSKyungmin Park 		kmem_cache_free(ubi_wl_entry_slab, e);
1433c91a719dSKyungmin Park 		return 0;
1434c91a719dSKyungmin Park 	}
1435c91a719dSKyungmin Park 
1436*ff94bc40SHeiko Schocher 	dbg_wl("erase PEB %d EC %d LEB %d:%d",
1437*ff94bc40SHeiko Schocher 	       pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum);
1438*ff94bc40SHeiko Schocher 
1439*ff94bc40SHeiko Schocher 	ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
1440c91a719dSKyungmin Park 
1441c91a719dSKyungmin Park 	err = sync_erase(ubi, e, wl_wrk->torture);
1442c91a719dSKyungmin Park 	if (!err) {
1443c91a719dSKyungmin Park 		/* Fine, we've erased it successfully */
1444c91a719dSKyungmin Park 		kfree(wl_wrk);
1445c91a719dSKyungmin Park 
1446c91a719dSKyungmin Park 		spin_lock(&ubi->wl_lock);
1447c91a719dSKyungmin Park 		wl_tree_add(e, &ubi->free);
1448*ff94bc40SHeiko Schocher 		ubi->free_count++;
1449c91a719dSKyungmin Park 		spin_unlock(&ubi->wl_lock);
1450c91a719dSKyungmin Park 
1451c91a719dSKyungmin Park 		/*
1452*ff94bc40SHeiko Schocher 		 * One more erase operation has happened, take care about
1453*ff94bc40SHeiko Schocher 		 * protected physical eraseblocks.
1454c91a719dSKyungmin Park 		 */
1455*ff94bc40SHeiko Schocher 		serve_prot_queue(ubi);
1456c91a719dSKyungmin Park 
1457c91a719dSKyungmin Park 		/* And take care about wear-leveling */
1458*ff94bc40SHeiko Schocher 		err = ensure_wear_leveling(ubi, 1);
1459c91a719dSKyungmin Park 		return err;
1460c91a719dSKyungmin Park 	}
1461c91a719dSKyungmin Park 
1462c91a719dSKyungmin Park 	ubi_err("failed to erase PEB %d, error %d", pnum, err);
1463c91a719dSKyungmin Park 	kfree(wl_wrk);
1464c91a719dSKyungmin Park 
1465c91a719dSKyungmin Park 	if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
1466c91a719dSKyungmin Park 	    err == -EBUSY) {
1467c91a719dSKyungmin Park 		int err1;
1468c91a719dSKyungmin Park 
1469c91a719dSKyungmin Park 		/* Re-schedule the LEB for erasure */
1470*ff94bc40SHeiko Schocher 		err1 = schedule_erase(ubi, e, vol_id, lnum, 0);
1471c91a719dSKyungmin Park 		if (err1) {
1472c91a719dSKyungmin Park 			err = err1;
1473c91a719dSKyungmin Park 			goto out_ro;
1474c91a719dSKyungmin Park 		}
1475c91a719dSKyungmin Park 		return err;
1476*ff94bc40SHeiko Schocher 	}
1477*ff94bc40SHeiko Schocher 
1478*ff94bc40SHeiko Schocher 	kmem_cache_free(ubi_wl_entry_slab, e);
1479*ff94bc40SHeiko Schocher 	if (err != -EIO)
1480c91a719dSKyungmin Park 		/*
1481c91a719dSKyungmin Park 		 * If this is not %-EIO, we have no idea what to do. Scheduling
1482c91a719dSKyungmin Park 		 * this physical eraseblock for erasure again would cause
1483*ff94bc40SHeiko Schocher 		 * errors again and again. Well, lets switch to R/O mode.
1484c91a719dSKyungmin Park 		 */
1485c91a719dSKyungmin Park 		goto out_ro;
1486c91a719dSKyungmin Park 
1487c91a719dSKyungmin Park 	/* It is %-EIO, the PEB went bad */
1488c91a719dSKyungmin Park 
1489c91a719dSKyungmin Park 	if (!ubi->bad_allowed) {
1490c91a719dSKyungmin Park 		ubi_err("bad physical eraseblock %d detected", pnum);
1491c91a719dSKyungmin Park 		goto out_ro;
1492c91a719dSKyungmin Park 	}
1493c91a719dSKyungmin Park 
1494c91a719dSKyungmin Park 	spin_lock(&ubi->volumes_lock);
1495c91a719dSKyungmin Park 	if (ubi->beb_rsvd_pebs == 0) {
1496*ff94bc40SHeiko Schocher 		if (ubi->avail_pebs == 0) {
1497c91a719dSKyungmin Park 			spin_unlock(&ubi->volumes_lock);
1498*ff94bc40SHeiko Schocher 			ubi_err("no reserved/available physical eraseblocks");
1499c91a719dSKyungmin Park 			goto out_ro;
1500c91a719dSKyungmin Park 		}
1501*ff94bc40SHeiko Schocher 		ubi->avail_pebs -= 1;
1502*ff94bc40SHeiko Schocher 		available_consumed = 1;
1503*ff94bc40SHeiko Schocher 	}
1504c91a719dSKyungmin Park 	spin_unlock(&ubi->volumes_lock);
1505c91a719dSKyungmin Park 
1506*ff94bc40SHeiko Schocher 	ubi_msg("mark PEB %d as bad", pnum);
1507c91a719dSKyungmin Park 	err = ubi_io_mark_bad(ubi, pnum);
1508c91a719dSKyungmin Park 	if (err)
1509c91a719dSKyungmin Park 		goto out_ro;
1510c91a719dSKyungmin Park 
1511c91a719dSKyungmin Park 	spin_lock(&ubi->volumes_lock);
1512*ff94bc40SHeiko Schocher 	if (ubi->beb_rsvd_pebs > 0) {
1513*ff94bc40SHeiko Schocher 		if (available_consumed) {
1514*ff94bc40SHeiko Schocher 			/*
1515*ff94bc40SHeiko Schocher 			 * The amount of reserved PEBs increased since we last
1516*ff94bc40SHeiko Schocher 			 * checked.
1517*ff94bc40SHeiko Schocher 			 */
1518*ff94bc40SHeiko Schocher 			ubi->avail_pebs += 1;
1519*ff94bc40SHeiko Schocher 			available_consumed = 0;
1520*ff94bc40SHeiko Schocher 		}
1521c91a719dSKyungmin Park 		ubi->beb_rsvd_pebs -= 1;
1522*ff94bc40SHeiko Schocher 	}
1523c91a719dSKyungmin Park 	ubi->bad_peb_count += 1;
1524c91a719dSKyungmin Park 	ubi->good_peb_count -= 1;
1525c91a719dSKyungmin Park 	ubi_calculate_reserved(ubi);
1526*ff94bc40SHeiko Schocher 	if (available_consumed)
1527*ff94bc40SHeiko Schocher 		ubi_warn("no PEBs in the reserved pool, used an available PEB");
1528*ff94bc40SHeiko Schocher 	else if (ubi->beb_rsvd_pebs)
1529*ff94bc40SHeiko Schocher 		ubi_msg("%d PEBs left in the reserve", ubi->beb_rsvd_pebs);
1530*ff94bc40SHeiko Schocher 	else
1531*ff94bc40SHeiko Schocher 		ubi_warn("last PEB from the reserve was used");
1532c91a719dSKyungmin Park 	spin_unlock(&ubi->volumes_lock);
1533c91a719dSKyungmin Park 
1534c91a719dSKyungmin Park 	return err;
1535c91a719dSKyungmin Park 
1536c91a719dSKyungmin Park out_ro:
1537*ff94bc40SHeiko Schocher 	if (available_consumed) {
1538*ff94bc40SHeiko Schocher 		spin_lock(&ubi->volumes_lock);
1539*ff94bc40SHeiko Schocher 		ubi->avail_pebs += 1;
1540*ff94bc40SHeiko Schocher 		spin_unlock(&ubi->volumes_lock);
1541*ff94bc40SHeiko Schocher 	}
1542c91a719dSKyungmin Park 	ubi_ro_mode(ubi);
1543c91a719dSKyungmin Park 	return err;
1544c91a719dSKyungmin Park }
1545c91a719dSKyungmin Park 
1546c91a719dSKyungmin Park /**
1547*ff94bc40SHeiko Schocher  * ubi_wl_put_peb - return a PEB to the wear-leveling sub-system.
1548c91a719dSKyungmin Park  * @ubi: UBI device description object
1549*ff94bc40SHeiko Schocher  * @vol_id: the volume ID that last used this PEB
1550*ff94bc40SHeiko Schocher  * @lnum: the last used logical eraseblock number for the PEB
1551c91a719dSKyungmin Park  * @pnum: physical eraseblock to return
1552c91a719dSKyungmin Park  * @torture: if this physical eraseblock has to be tortured
1553c91a719dSKyungmin Park  *
1554c91a719dSKyungmin Park  * This function is called to return physical eraseblock @pnum to the pool of
1555c91a719dSKyungmin Park  * free physical eraseblocks. The @torture flag has to be set if an I/O error
1556c91a719dSKyungmin Park  * occurred to this @pnum and it has to be tested. This function returns zero
1557c91a719dSKyungmin Park  * in case of success, and a negative error code in case of failure.
1558c91a719dSKyungmin Park  */
1559*ff94bc40SHeiko Schocher int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
1560*ff94bc40SHeiko Schocher 		   int pnum, int torture)
1561c91a719dSKyungmin Park {
1562c91a719dSKyungmin Park 	int err;
1563c91a719dSKyungmin Park 	struct ubi_wl_entry *e;
1564c91a719dSKyungmin Park 
1565c91a719dSKyungmin Park 	dbg_wl("PEB %d", pnum);
1566c91a719dSKyungmin Park 	ubi_assert(pnum >= 0);
1567c91a719dSKyungmin Park 	ubi_assert(pnum < ubi->peb_count);
1568c91a719dSKyungmin Park 
1569c91a719dSKyungmin Park retry:
1570c91a719dSKyungmin Park 	spin_lock(&ubi->wl_lock);
1571c91a719dSKyungmin Park 	e = ubi->lookuptbl[pnum];
1572c91a719dSKyungmin Park 	if (e == ubi->move_from) {
1573c91a719dSKyungmin Park 		/*
1574c91a719dSKyungmin Park 		 * User is putting the physical eraseblock which was selected to
1575c91a719dSKyungmin Park 		 * be moved. It will be scheduled for erasure in the
1576c91a719dSKyungmin Park 		 * wear-leveling worker.
1577c91a719dSKyungmin Park 		 */
1578c91a719dSKyungmin Park 		dbg_wl("PEB %d is being moved, wait", pnum);
1579c91a719dSKyungmin Park 		spin_unlock(&ubi->wl_lock);
1580c91a719dSKyungmin Park 
1581c91a719dSKyungmin Park 		/* Wait for the WL worker by taking the @ubi->move_mutex */
1582c91a719dSKyungmin Park 		mutex_lock(&ubi->move_mutex);
1583c91a719dSKyungmin Park 		mutex_unlock(&ubi->move_mutex);
1584c91a719dSKyungmin Park 		goto retry;
1585c91a719dSKyungmin Park 	} else if (e == ubi->move_to) {
1586c91a719dSKyungmin Park 		/*
1587c91a719dSKyungmin Park 		 * User is putting the physical eraseblock which was selected
1588c91a719dSKyungmin Park 		 * as the target the data is moved to. It may happen if the EBA
1589*ff94bc40SHeiko Schocher 		 * sub-system already re-mapped the LEB in 'ubi_eba_copy_leb()'
1590*ff94bc40SHeiko Schocher 		 * but the WL sub-system has not put the PEB to the "used" tree
1591*ff94bc40SHeiko Schocher 		 * yet, but it is about to do this. So we just set a flag which
1592*ff94bc40SHeiko Schocher 		 * will tell the WL worker that the PEB is not needed anymore
1593*ff94bc40SHeiko Schocher 		 * and should be scheduled for erasure.
1594c91a719dSKyungmin Park 		 */
1595c91a719dSKyungmin Park 		dbg_wl("PEB %d is the target of data moving", pnum);
1596c91a719dSKyungmin Park 		ubi_assert(!ubi->move_to_put);
1597c91a719dSKyungmin Park 		ubi->move_to_put = 1;
1598c91a719dSKyungmin Park 		spin_unlock(&ubi->wl_lock);
1599c91a719dSKyungmin Park 		return 0;
1600c91a719dSKyungmin Park 	} else {
1601c91a719dSKyungmin Park 		if (in_wl_tree(e, &ubi->used)) {
1602*ff94bc40SHeiko Schocher 			self_check_in_wl_tree(ubi, e, &ubi->used);
1603*ff94bc40SHeiko Schocher 			rb_erase(&e->u.rb, &ubi->used);
1604c91a719dSKyungmin Park 		} else if (in_wl_tree(e, &ubi->scrub)) {
1605*ff94bc40SHeiko Schocher 			self_check_in_wl_tree(ubi, e, &ubi->scrub);
1606*ff94bc40SHeiko Schocher 			rb_erase(&e->u.rb, &ubi->scrub);
1607*ff94bc40SHeiko Schocher 		} else if (in_wl_tree(e, &ubi->erroneous)) {
1608*ff94bc40SHeiko Schocher 			self_check_in_wl_tree(ubi, e, &ubi->erroneous);
1609*ff94bc40SHeiko Schocher 			rb_erase(&e->u.rb, &ubi->erroneous);
1610*ff94bc40SHeiko Schocher 			ubi->erroneous_peb_count -= 1;
1611*ff94bc40SHeiko Schocher 			ubi_assert(ubi->erroneous_peb_count >= 0);
1612*ff94bc40SHeiko Schocher 			/* Erroneous PEBs should be tortured */
1613*ff94bc40SHeiko Schocher 			torture = 1;
1614c91a719dSKyungmin Park 		} else {
1615*ff94bc40SHeiko Schocher 			err = prot_queue_del(ubi, e->pnum);
1616c91a719dSKyungmin Park 			if (err) {
1617c91a719dSKyungmin Park 				ubi_err("PEB %d not found", pnum);
1618c91a719dSKyungmin Park 				ubi_ro_mode(ubi);
1619c91a719dSKyungmin Park 				spin_unlock(&ubi->wl_lock);
1620c91a719dSKyungmin Park 				return err;
1621c91a719dSKyungmin Park 			}
1622c91a719dSKyungmin Park 		}
1623c91a719dSKyungmin Park 	}
1624c91a719dSKyungmin Park 	spin_unlock(&ubi->wl_lock);
1625c91a719dSKyungmin Park 
1626*ff94bc40SHeiko Schocher 	err = schedule_erase(ubi, e, vol_id, lnum, torture);
1627c91a719dSKyungmin Park 	if (err) {
1628c91a719dSKyungmin Park 		spin_lock(&ubi->wl_lock);
1629c91a719dSKyungmin Park 		wl_tree_add(e, &ubi->used);
1630c91a719dSKyungmin Park 		spin_unlock(&ubi->wl_lock);
1631c91a719dSKyungmin Park 	}
1632c91a719dSKyungmin Park 
1633c91a719dSKyungmin Park 	return err;
1634c91a719dSKyungmin Park }
1635c91a719dSKyungmin Park 
1636c91a719dSKyungmin Park /**
1637c91a719dSKyungmin Park  * ubi_wl_scrub_peb - schedule a physical eraseblock for scrubbing.
1638c91a719dSKyungmin Park  * @ubi: UBI device description object
1639c91a719dSKyungmin Park  * @pnum: the physical eraseblock to schedule
1640c91a719dSKyungmin Park  *
1641c91a719dSKyungmin Park  * If a bit-flip in a physical eraseblock is detected, this physical eraseblock
1642c91a719dSKyungmin Park  * needs scrubbing. This function schedules a physical eraseblock for
1643c91a719dSKyungmin Park  * scrubbing which is done in background. This function returns zero in case of
1644c91a719dSKyungmin Park  * success and a negative error code in case of failure.
1645c91a719dSKyungmin Park  */
1646c91a719dSKyungmin Park int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
1647c91a719dSKyungmin Park {
1648c91a719dSKyungmin Park 	struct ubi_wl_entry *e;
1649c91a719dSKyungmin Park 
1650c91a719dSKyungmin Park 	ubi_msg("schedule PEB %d for scrubbing", pnum);
1651c91a719dSKyungmin Park 
1652c91a719dSKyungmin Park retry:
1653c91a719dSKyungmin Park 	spin_lock(&ubi->wl_lock);
1654c91a719dSKyungmin Park 	e = ubi->lookuptbl[pnum];
1655*ff94bc40SHeiko Schocher 	if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) ||
1656*ff94bc40SHeiko Schocher 				   in_wl_tree(e, &ubi->erroneous)) {
1657c91a719dSKyungmin Park 		spin_unlock(&ubi->wl_lock);
1658c91a719dSKyungmin Park 		return 0;
1659c91a719dSKyungmin Park 	}
1660c91a719dSKyungmin Park 
1661c91a719dSKyungmin Park 	if (e == ubi->move_to) {
1662c91a719dSKyungmin Park 		/*
1663c91a719dSKyungmin Park 		 * This physical eraseblock was used to move data to. The data
1664c91a719dSKyungmin Park 		 * was moved but the PEB was not yet inserted to the proper
1665c91a719dSKyungmin Park 		 * tree. We should just wait a little and let the WL worker
1666c91a719dSKyungmin Park 		 * proceed.
1667c91a719dSKyungmin Park 		 */
1668c91a719dSKyungmin Park 		spin_unlock(&ubi->wl_lock);
1669c91a719dSKyungmin Park 		dbg_wl("the PEB %d is not in proper tree, retry", pnum);
1670c91a719dSKyungmin Park 		yield();
1671c91a719dSKyungmin Park 		goto retry;
1672c91a719dSKyungmin Park 	}
1673c91a719dSKyungmin Park 
1674c91a719dSKyungmin Park 	if (in_wl_tree(e, &ubi->used)) {
1675*ff94bc40SHeiko Schocher 		self_check_in_wl_tree(ubi, e, &ubi->used);
1676*ff94bc40SHeiko Schocher 		rb_erase(&e->u.rb, &ubi->used);
1677c91a719dSKyungmin Park 	} else {
1678c91a719dSKyungmin Park 		int err;
1679c91a719dSKyungmin Park 
1680*ff94bc40SHeiko Schocher 		err = prot_queue_del(ubi, e->pnum);
1681c91a719dSKyungmin Park 		if (err) {
1682c91a719dSKyungmin Park 			ubi_err("PEB %d not found", pnum);
1683c91a719dSKyungmin Park 			ubi_ro_mode(ubi);
1684c91a719dSKyungmin Park 			spin_unlock(&ubi->wl_lock);
1685c91a719dSKyungmin Park 			return err;
1686c91a719dSKyungmin Park 		}
1687c91a719dSKyungmin Park 	}
1688c91a719dSKyungmin Park 
1689c91a719dSKyungmin Park 	wl_tree_add(e, &ubi->scrub);
1690c91a719dSKyungmin Park 	spin_unlock(&ubi->wl_lock);
1691c91a719dSKyungmin Park 
1692c91a719dSKyungmin Park 	/*
1693c91a719dSKyungmin Park 	 * Technically scrubbing is the same as wear-leveling, so it is done
1694c91a719dSKyungmin Park 	 * by the WL worker.
1695c91a719dSKyungmin Park 	 */
1696*ff94bc40SHeiko Schocher 	return ensure_wear_leveling(ubi, 0);
1697c91a719dSKyungmin Park }
1698c91a719dSKyungmin Park 
1699c91a719dSKyungmin Park /**
1700c91a719dSKyungmin Park  * ubi_wl_flush - flush all pending works.
1701c91a719dSKyungmin Park  * @ubi: UBI device description object
1702*ff94bc40SHeiko Schocher  * @vol_id: the volume id to flush for
1703*ff94bc40SHeiko Schocher  * @lnum: the logical eraseblock number to flush for
1704c91a719dSKyungmin Park  *
1705*ff94bc40SHeiko Schocher  * This function executes all pending works for a particular volume id /
1706*ff94bc40SHeiko Schocher  * logical eraseblock number pair. If either value is set to %UBI_ALL, then it
1707*ff94bc40SHeiko Schocher  * acts as a wildcard for all of the corresponding volume numbers or logical
1708*ff94bc40SHeiko Schocher  * eraseblock numbers. It returns zero in case of success and a negative error
1709*ff94bc40SHeiko Schocher  * code in case of failure.
1710c91a719dSKyungmin Park  */
1711*ff94bc40SHeiko Schocher int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
1712c91a719dSKyungmin Park {
1713*ff94bc40SHeiko Schocher 	int err = 0;
1714*ff94bc40SHeiko Schocher 	int found = 1;
1715c91a719dSKyungmin Park 
1716c91a719dSKyungmin Park 	/*
1717*ff94bc40SHeiko Schocher 	 * Erase while the pending works queue is not empty, but not more than
1718c91a719dSKyungmin Park 	 * the number of currently pending works.
1719c91a719dSKyungmin Park 	 */
1720*ff94bc40SHeiko Schocher 	dbg_wl("flush pending work for LEB %d:%d (%d pending works)",
1721*ff94bc40SHeiko Schocher 	       vol_id, lnum, ubi->works_count);
1722*ff94bc40SHeiko Schocher 
1723*ff94bc40SHeiko Schocher 	while (found) {
1724*ff94bc40SHeiko Schocher 		struct ubi_work *wrk;
1725*ff94bc40SHeiko Schocher 		found = 0;
1726*ff94bc40SHeiko Schocher 
1727*ff94bc40SHeiko Schocher 		down_read(&ubi->work_sem);
1728*ff94bc40SHeiko Schocher 		spin_lock(&ubi->wl_lock);
1729*ff94bc40SHeiko Schocher 		list_for_each_entry(wrk, &ubi->works, list) {
1730*ff94bc40SHeiko Schocher 			if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) &&
1731*ff94bc40SHeiko Schocher 			    (lnum == UBI_ALL || wrk->lnum == lnum)) {
1732*ff94bc40SHeiko Schocher 				list_del(&wrk->list);
1733*ff94bc40SHeiko Schocher 				ubi->works_count -= 1;
1734*ff94bc40SHeiko Schocher 				ubi_assert(ubi->works_count >= 0);
1735*ff94bc40SHeiko Schocher 				spin_unlock(&ubi->wl_lock);
1736*ff94bc40SHeiko Schocher 
1737*ff94bc40SHeiko Schocher 				err = wrk->func(ubi, wrk, 0);
1738*ff94bc40SHeiko Schocher 				if (err) {
1739*ff94bc40SHeiko Schocher 					up_read(&ubi->work_sem);
1740c91a719dSKyungmin Park 					return err;
1741c91a719dSKyungmin Park 				}
1742c91a719dSKyungmin Park 
1743*ff94bc40SHeiko Schocher 				spin_lock(&ubi->wl_lock);
1744*ff94bc40SHeiko Schocher 				found = 1;
1745*ff94bc40SHeiko Schocher 				break;
1746*ff94bc40SHeiko Schocher 			}
1747*ff94bc40SHeiko Schocher 		}
1748*ff94bc40SHeiko Schocher 		spin_unlock(&ubi->wl_lock);
1749*ff94bc40SHeiko Schocher 		up_read(&ubi->work_sem);
1750*ff94bc40SHeiko Schocher 	}
1751*ff94bc40SHeiko Schocher 
1752c91a719dSKyungmin Park 	/*
1753c91a719dSKyungmin Park 	 * Make sure all the works which have been done in parallel are
1754c91a719dSKyungmin Park 	 * finished.
1755c91a719dSKyungmin Park 	 */
1756c91a719dSKyungmin Park 	down_write(&ubi->work_sem);
1757c91a719dSKyungmin Park 	up_write(&ubi->work_sem);
1758c91a719dSKyungmin Park 
1759c91a719dSKyungmin Park 	return err;
1760c91a719dSKyungmin Park }
1761c91a719dSKyungmin Park 
1762c91a719dSKyungmin Park /**
1763c91a719dSKyungmin Park  * tree_destroy - destroy an RB-tree.
1764c91a719dSKyungmin Park  * @root: the root of the tree to destroy
1765c91a719dSKyungmin Park  */
1766c91a719dSKyungmin Park static void tree_destroy(struct rb_root *root)
1767c91a719dSKyungmin Park {
1768c91a719dSKyungmin Park 	struct rb_node *rb;
1769c91a719dSKyungmin Park 	struct ubi_wl_entry *e;
1770c91a719dSKyungmin Park 
1771c91a719dSKyungmin Park 	rb = root->rb_node;
1772c91a719dSKyungmin Park 	while (rb) {
1773c91a719dSKyungmin Park 		if (rb->rb_left)
1774c91a719dSKyungmin Park 			rb = rb->rb_left;
1775c91a719dSKyungmin Park 		else if (rb->rb_right)
1776c91a719dSKyungmin Park 			rb = rb->rb_right;
1777c91a719dSKyungmin Park 		else {
1778*ff94bc40SHeiko Schocher 			e = rb_entry(rb, struct ubi_wl_entry, u.rb);
1779c91a719dSKyungmin Park 
1780c91a719dSKyungmin Park 			rb = rb_parent(rb);
1781c91a719dSKyungmin Park 			if (rb) {
1782*ff94bc40SHeiko Schocher 				if (rb->rb_left == &e->u.rb)
1783c91a719dSKyungmin Park 					rb->rb_left = NULL;
1784c91a719dSKyungmin Park 				else
1785c91a719dSKyungmin Park 					rb->rb_right = NULL;
1786c91a719dSKyungmin Park 			}
1787c91a719dSKyungmin Park 
1788c91a719dSKyungmin Park 			kmem_cache_free(ubi_wl_entry_slab, e);
1789c91a719dSKyungmin Park 		}
1790c91a719dSKyungmin Park 	}
1791c91a719dSKyungmin Park }
1792c91a719dSKyungmin Park 
1793c91a719dSKyungmin Park /**
1794c91a719dSKyungmin Park  * ubi_thread - UBI background thread.
1795c91a719dSKyungmin Park  * @u: the UBI device description object pointer
1796c91a719dSKyungmin Park  */
1797c91a719dSKyungmin Park int ubi_thread(void *u)
1798c91a719dSKyungmin Park {
1799c91a719dSKyungmin Park 	int failures = 0;
1800c91a719dSKyungmin Park 	struct ubi_device *ubi = u;
1801c91a719dSKyungmin Park 
1802c91a719dSKyungmin Park 	ubi_msg("background thread \"%s\" started, PID %d",
1803c91a719dSKyungmin Park 		ubi->bgt_name, task_pid_nr(current));
1804c91a719dSKyungmin Park 
1805c91a719dSKyungmin Park 	set_freezable();
1806c91a719dSKyungmin Park 	for (;;) {
1807c91a719dSKyungmin Park 		int err;
1808c91a719dSKyungmin Park 
1809c91a719dSKyungmin Park 		if (kthread_should_stop())
1810c91a719dSKyungmin Park 			break;
1811c91a719dSKyungmin Park 
1812c91a719dSKyungmin Park 		if (try_to_freeze())
1813c91a719dSKyungmin Park 			continue;
1814c91a719dSKyungmin Park 
1815c91a719dSKyungmin Park 		spin_lock(&ubi->wl_lock);
1816c91a719dSKyungmin Park 		if (list_empty(&ubi->works) || ubi->ro_mode ||
1817*ff94bc40SHeiko Schocher 		    !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi)) {
1818c91a719dSKyungmin Park 			set_current_state(TASK_INTERRUPTIBLE);
1819c91a719dSKyungmin Park 			spin_unlock(&ubi->wl_lock);
1820c91a719dSKyungmin Park 			schedule();
1821c91a719dSKyungmin Park 			continue;
1822c91a719dSKyungmin Park 		}
1823c91a719dSKyungmin Park 		spin_unlock(&ubi->wl_lock);
1824c91a719dSKyungmin Park 
1825c91a719dSKyungmin Park 		err = do_work(ubi);
1826c91a719dSKyungmin Park 		if (err) {
1827c91a719dSKyungmin Park 			ubi_err("%s: work failed with error code %d",
1828c91a719dSKyungmin Park 				ubi->bgt_name, err);
1829c91a719dSKyungmin Park 			if (failures++ > WL_MAX_FAILURES) {
1830c91a719dSKyungmin Park 				/*
1831c91a719dSKyungmin Park 				 * Too many failures, disable the thread and
1832c91a719dSKyungmin Park 				 * switch to read-only mode.
1833c91a719dSKyungmin Park 				 */
1834c91a719dSKyungmin Park 				ubi_msg("%s: %d consecutive failures",
1835c91a719dSKyungmin Park 					ubi->bgt_name, WL_MAX_FAILURES);
1836c91a719dSKyungmin Park 				ubi_ro_mode(ubi);
1837*ff94bc40SHeiko Schocher 				ubi->thread_enabled = 0;
1838*ff94bc40SHeiko Schocher 				continue;
1839c91a719dSKyungmin Park 			}
1840c91a719dSKyungmin Park 		} else
1841c91a719dSKyungmin Park 			failures = 0;
1842c91a719dSKyungmin Park 
1843c91a719dSKyungmin Park 		cond_resched();
1844c91a719dSKyungmin Park 	}
1845c91a719dSKyungmin Park 
1846c91a719dSKyungmin Park 	dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
1847c91a719dSKyungmin Park 	return 0;
1848c91a719dSKyungmin Park }
1849c91a719dSKyungmin Park 
1850c91a719dSKyungmin Park /**
1851c91a719dSKyungmin Park  * cancel_pending - cancel all pending works.
1852c91a719dSKyungmin Park  * @ubi: UBI device description object
1853c91a719dSKyungmin Park  */
1854c91a719dSKyungmin Park static void cancel_pending(struct ubi_device *ubi)
1855c91a719dSKyungmin Park {
1856c91a719dSKyungmin Park 	while (!list_empty(&ubi->works)) {
1857c91a719dSKyungmin Park 		struct ubi_work *wrk;
1858c91a719dSKyungmin Park 
1859c91a719dSKyungmin Park 		wrk = list_entry(ubi->works.next, struct ubi_work, list);
1860c91a719dSKyungmin Park 		list_del(&wrk->list);
1861c91a719dSKyungmin Park 		wrk->func(ubi, wrk, 1);
1862c91a719dSKyungmin Park 		ubi->works_count -= 1;
1863c91a719dSKyungmin Park 		ubi_assert(ubi->works_count >= 0);
1864c91a719dSKyungmin Park 	}
1865c91a719dSKyungmin Park }
1866c91a719dSKyungmin Park 
1867c91a719dSKyungmin Park /**
1868*ff94bc40SHeiko Schocher  * ubi_wl_init - initialize the WL sub-system using attaching information.
1869c91a719dSKyungmin Park  * @ubi: UBI device description object
1870*ff94bc40SHeiko Schocher  * @ai: attaching information
1871c91a719dSKyungmin Park  *
1872c91a719dSKyungmin Park  * This function returns zero in case of success, and a negative error code in
1873c91a719dSKyungmin Park  * case of failure.
1874c91a719dSKyungmin Park  */
1875*ff94bc40SHeiko Schocher int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1876c91a719dSKyungmin Park {
1877*ff94bc40SHeiko Schocher 	int err, i, reserved_pebs, found_pebs = 0;
1878c91a719dSKyungmin Park 	struct rb_node *rb1, *rb2;
1879*ff94bc40SHeiko Schocher 	struct ubi_ainf_volume *av;
1880*ff94bc40SHeiko Schocher 	struct ubi_ainf_peb *aeb, *tmp;
1881c91a719dSKyungmin Park 	struct ubi_wl_entry *e;
1882c91a719dSKyungmin Park 
1883*ff94bc40SHeiko Schocher 	ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT;
1884c91a719dSKyungmin Park 	spin_lock_init(&ubi->wl_lock);
1885c91a719dSKyungmin Park 	mutex_init(&ubi->move_mutex);
1886c91a719dSKyungmin Park 	init_rwsem(&ubi->work_sem);
1887*ff94bc40SHeiko Schocher 	ubi->max_ec = ai->max_ec;
1888c91a719dSKyungmin Park 	INIT_LIST_HEAD(&ubi->works);
1889*ff94bc40SHeiko Schocher #ifndef __UBOOT__
1890*ff94bc40SHeiko Schocher #ifdef CONFIG_MTD_UBI_FASTMAP
1891*ff94bc40SHeiko Schocher 	INIT_WORK(&ubi->fm_work, update_fastmap_work_fn);
1892*ff94bc40SHeiko Schocher #endif
1893*ff94bc40SHeiko Schocher #endif
1894c91a719dSKyungmin Park 
1895c91a719dSKyungmin Park 	sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
1896c91a719dSKyungmin Park 
1897c91a719dSKyungmin Park 	err = -ENOMEM;
1898c91a719dSKyungmin Park 	ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL);
1899c91a719dSKyungmin Park 	if (!ubi->lookuptbl)
1900c91a719dSKyungmin Park 		return err;
1901c91a719dSKyungmin Park 
1902*ff94bc40SHeiko Schocher 	for (i = 0; i < UBI_PROT_QUEUE_LEN; i++)
1903*ff94bc40SHeiko Schocher 		INIT_LIST_HEAD(&ubi->pq[i]);
1904*ff94bc40SHeiko Schocher 	ubi->pq_head = 0;
1905*ff94bc40SHeiko Schocher 
1906*ff94bc40SHeiko Schocher 	list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) {
1907c91a719dSKyungmin Park 		cond_resched();
1908c91a719dSKyungmin Park 
1909c91a719dSKyungmin Park 		e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1910c91a719dSKyungmin Park 		if (!e)
1911c91a719dSKyungmin Park 			goto out_free;
1912c91a719dSKyungmin Park 
1913*ff94bc40SHeiko Schocher 		e->pnum = aeb->pnum;
1914*ff94bc40SHeiko Schocher 		e->ec = aeb->ec;
1915*ff94bc40SHeiko Schocher 		ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
1916c91a719dSKyungmin Park 		ubi->lookuptbl[e->pnum] = e;
1917*ff94bc40SHeiko Schocher 		if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) {
1918c91a719dSKyungmin Park 			kmem_cache_free(ubi_wl_entry_slab, e);
1919c91a719dSKyungmin Park 			goto out_free;
1920c91a719dSKyungmin Park 		}
1921*ff94bc40SHeiko Schocher 
1922*ff94bc40SHeiko Schocher 		found_pebs++;
1923c91a719dSKyungmin Park 	}
1924c91a719dSKyungmin Park 
1925*ff94bc40SHeiko Schocher 	ubi->free_count = 0;
1926*ff94bc40SHeiko Schocher 	list_for_each_entry(aeb, &ai->free, u.list) {
1927c91a719dSKyungmin Park 		cond_resched();
1928c91a719dSKyungmin Park 
1929c91a719dSKyungmin Park 		e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1930c91a719dSKyungmin Park 		if (!e)
1931c91a719dSKyungmin Park 			goto out_free;
1932c91a719dSKyungmin Park 
1933*ff94bc40SHeiko Schocher 		e->pnum = aeb->pnum;
1934*ff94bc40SHeiko Schocher 		e->ec = aeb->ec;
1935c91a719dSKyungmin Park 		ubi_assert(e->ec >= 0);
1936*ff94bc40SHeiko Schocher 		ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
1937*ff94bc40SHeiko Schocher 
1938c91a719dSKyungmin Park 		wl_tree_add(e, &ubi->free);
1939*ff94bc40SHeiko Schocher 		ubi->free_count++;
1940*ff94bc40SHeiko Schocher 
1941c91a719dSKyungmin Park 		ubi->lookuptbl[e->pnum] = e;
1942*ff94bc40SHeiko Schocher 
1943*ff94bc40SHeiko Schocher 		found_pebs++;
1944c91a719dSKyungmin Park 	}
1945c91a719dSKyungmin Park 
1946*ff94bc40SHeiko Schocher 	ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
1947*ff94bc40SHeiko Schocher 		ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
1948c91a719dSKyungmin Park 			cond_resched();
1949c91a719dSKyungmin Park 
1950c91a719dSKyungmin Park 			e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1951c91a719dSKyungmin Park 			if (!e)
1952c91a719dSKyungmin Park 				goto out_free;
1953c91a719dSKyungmin Park 
1954*ff94bc40SHeiko Schocher 			e->pnum = aeb->pnum;
1955*ff94bc40SHeiko Schocher 			e->ec = aeb->ec;
1956c91a719dSKyungmin Park 			ubi->lookuptbl[e->pnum] = e;
1957c91a719dSKyungmin Park 
1958*ff94bc40SHeiko Schocher 			if (!aeb->scrub) {
1959c91a719dSKyungmin Park 				dbg_wl("add PEB %d EC %d to the used tree",
1960c91a719dSKyungmin Park 				       e->pnum, e->ec);
1961c91a719dSKyungmin Park 				wl_tree_add(e, &ubi->used);
1962c91a719dSKyungmin Park 			} else {
1963c91a719dSKyungmin Park 				dbg_wl("add PEB %d EC %d to the scrub tree",
1964c91a719dSKyungmin Park 				       e->pnum, e->ec);
1965c91a719dSKyungmin Park 				wl_tree_add(e, &ubi->scrub);
1966c91a719dSKyungmin Park 			}
1967*ff94bc40SHeiko Schocher 
1968*ff94bc40SHeiko Schocher 			found_pebs++;
1969c91a719dSKyungmin Park 		}
1970c91a719dSKyungmin Park 	}
1971c91a719dSKyungmin Park 
1972*ff94bc40SHeiko Schocher 	dbg_wl("found %i PEBs", found_pebs);
1973*ff94bc40SHeiko Schocher 
1974*ff94bc40SHeiko Schocher 	if (ubi->fm)
1975*ff94bc40SHeiko Schocher 		ubi_assert(ubi->good_peb_count == \
1976*ff94bc40SHeiko Schocher 			   found_pebs + ubi->fm->used_blocks);
1977*ff94bc40SHeiko Schocher 	else
1978*ff94bc40SHeiko Schocher 		ubi_assert(ubi->good_peb_count == found_pebs);
1979*ff94bc40SHeiko Schocher 
1980*ff94bc40SHeiko Schocher 	reserved_pebs = WL_RESERVED_PEBS;
1981*ff94bc40SHeiko Schocher #ifdef CONFIG_MTD_UBI_FASTMAP
1982*ff94bc40SHeiko Schocher 	/* Reserve enough LEBs to store two fastmaps. */
1983*ff94bc40SHeiko Schocher 	reserved_pebs += (ubi->fm_size / ubi->leb_size) * 2;
1984*ff94bc40SHeiko Schocher #endif
1985*ff94bc40SHeiko Schocher 
1986*ff94bc40SHeiko Schocher 	if (ubi->avail_pebs < reserved_pebs) {
1987c91a719dSKyungmin Park 		ubi_err("no enough physical eraseblocks (%d, need %d)",
1988*ff94bc40SHeiko Schocher 			ubi->avail_pebs, reserved_pebs);
1989*ff94bc40SHeiko Schocher 		if (ubi->corr_peb_count)
1990*ff94bc40SHeiko Schocher 			ubi_err("%d PEBs are corrupted and not used",
1991*ff94bc40SHeiko Schocher 				ubi->corr_peb_count);
1992c91a719dSKyungmin Park 		goto out_free;
1993c91a719dSKyungmin Park 	}
1994*ff94bc40SHeiko Schocher 	ubi->avail_pebs -= reserved_pebs;
1995*ff94bc40SHeiko Schocher 	ubi->rsvd_pebs += reserved_pebs;
1996c91a719dSKyungmin Park 
1997c91a719dSKyungmin Park 	/* Schedule wear-leveling if needed */
1998*ff94bc40SHeiko Schocher 	err = ensure_wear_leveling(ubi, 0);
1999c91a719dSKyungmin Park 	if (err)
2000c91a719dSKyungmin Park 		goto out_free;
2001c91a719dSKyungmin Park 
2002c91a719dSKyungmin Park 	return 0;
2003c91a719dSKyungmin Park 
2004c91a719dSKyungmin Park out_free:
2005c91a719dSKyungmin Park 	cancel_pending(ubi);
2006c91a719dSKyungmin Park 	tree_destroy(&ubi->used);
2007c91a719dSKyungmin Park 	tree_destroy(&ubi->free);
2008c91a719dSKyungmin Park 	tree_destroy(&ubi->scrub);
2009c91a719dSKyungmin Park 	kfree(ubi->lookuptbl);
2010c91a719dSKyungmin Park 	return err;
2011c91a719dSKyungmin Park }
2012c91a719dSKyungmin Park 
2013c91a719dSKyungmin Park /**
2014*ff94bc40SHeiko Schocher  * protection_queue_destroy - destroy the protection queue.
2015c91a719dSKyungmin Park  * @ubi: UBI device description object
2016c91a719dSKyungmin Park  */
2017*ff94bc40SHeiko Schocher static void protection_queue_destroy(struct ubi_device *ubi)
2018c91a719dSKyungmin Park {
2019*ff94bc40SHeiko Schocher 	int i;
2020*ff94bc40SHeiko Schocher 	struct ubi_wl_entry *e, *tmp;
2021c91a719dSKyungmin Park 
2022*ff94bc40SHeiko Schocher 	for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) {
2023*ff94bc40SHeiko Schocher 		list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
2024*ff94bc40SHeiko Schocher 			list_del(&e->u.list);
2025*ff94bc40SHeiko Schocher 			kmem_cache_free(ubi_wl_entry_slab, e);
2026c91a719dSKyungmin Park 		}
2027c91a719dSKyungmin Park 	}
2028c91a719dSKyungmin Park }
2029c91a719dSKyungmin Park 
2030c91a719dSKyungmin Park /**
2031*ff94bc40SHeiko Schocher  * ubi_wl_close - close the wear-leveling sub-system.
2032c91a719dSKyungmin Park  * @ubi: UBI device description object
2033c91a719dSKyungmin Park  */
2034c91a719dSKyungmin Park void ubi_wl_close(struct ubi_device *ubi)
2035c91a719dSKyungmin Park {
2036*ff94bc40SHeiko Schocher 	dbg_wl("close the WL sub-system");
2037c91a719dSKyungmin Park 	cancel_pending(ubi);
2038*ff94bc40SHeiko Schocher 	protection_queue_destroy(ubi);
2039c91a719dSKyungmin Park 	tree_destroy(&ubi->used);
2040*ff94bc40SHeiko Schocher 	tree_destroy(&ubi->erroneous);
2041c91a719dSKyungmin Park 	tree_destroy(&ubi->free);
2042c91a719dSKyungmin Park 	tree_destroy(&ubi->scrub);
2043c91a719dSKyungmin Park 	kfree(ubi->lookuptbl);
2044c91a719dSKyungmin Park }
2045c91a719dSKyungmin Park 
2046c91a719dSKyungmin Park /**
2047*ff94bc40SHeiko Schocher  * self_check_ec - make sure that the erase counter of a PEB is correct.
2048c91a719dSKyungmin Park  * @ubi: UBI device description object
2049c91a719dSKyungmin Park  * @pnum: the physical eraseblock number to check
2050c91a719dSKyungmin Park  * @ec: the erase counter to check
2051c91a719dSKyungmin Park  *
2052c91a719dSKyungmin Park  * This function returns zero if the erase counter of physical eraseblock @pnum
2053*ff94bc40SHeiko Schocher  * is equivalent to @ec, and a negative error code if not or if an error
2054c91a719dSKyungmin Park  * occurred.
2055c91a719dSKyungmin Park  */
2056*ff94bc40SHeiko Schocher static int self_check_ec(struct ubi_device *ubi, int pnum, int ec)
2057c91a719dSKyungmin Park {
2058c91a719dSKyungmin Park 	int err;
2059c91a719dSKyungmin Park 	long long read_ec;
2060c91a719dSKyungmin Park 	struct ubi_ec_hdr *ec_hdr;
2061c91a719dSKyungmin Park 
2062*ff94bc40SHeiko Schocher 	if (!ubi_dbg_chk_gen(ubi))
2063*ff94bc40SHeiko Schocher 		return 0;
2064*ff94bc40SHeiko Schocher 
2065c91a719dSKyungmin Park 	ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
2066c91a719dSKyungmin Park 	if (!ec_hdr)
2067c91a719dSKyungmin Park 		return -ENOMEM;
2068c91a719dSKyungmin Park 
2069c91a719dSKyungmin Park 	err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
2070c91a719dSKyungmin Park 	if (err && err != UBI_IO_BITFLIPS) {
2071c91a719dSKyungmin Park 		/* The header does not have to exist */
2072c91a719dSKyungmin Park 		err = 0;
2073c91a719dSKyungmin Park 		goto out_free;
2074c91a719dSKyungmin Park 	}
2075c91a719dSKyungmin Park 
2076c91a719dSKyungmin Park 	read_ec = be64_to_cpu(ec_hdr->ec);
2077*ff94bc40SHeiko Schocher 	if (ec != read_ec && read_ec - ec > 1) {
2078*ff94bc40SHeiko Schocher 		ubi_err("self-check failed for PEB %d", pnum);
2079c91a719dSKyungmin Park 		ubi_err("read EC is %lld, should be %d", read_ec, ec);
2080*ff94bc40SHeiko Schocher 		dump_stack();
2081c91a719dSKyungmin Park 		err = 1;
2082c91a719dSKyungmin Park 	} else
2083c91a719dSKyungmin Park 		err = 0;
2084c91a719dSKyungmin Park 
2085c91a719dSKyungmin Park out_free:
2086c91a719dSKyungmin Park 	kfree(ec_hdr);
2087c91a719dSKyungmin Park 	return err;
2088c91a719dSKyungmin Park }
2089c91a719dSKyungmin Park 
2090c91a719dSKyungmin Park /**
2091*ff94bc40SHeiko Schocher  * self_check_in_wl_tree - check that wear-leveling entry is in WL RB-tree.
2092*ff94bc40SHeiko Schocher  * @ubi: UBI device description object
2093c91a719dSKyungmin Park  * @e: the wear-leveling entry to check
2094c91a719dSKyungmin Park  * @root: the root of the tree
2095c91a719dSKyungmin Park  *
2096*ff94bc40SHeiko Schocher  * This function returns zero if @e is in the @root RB-tree and %-EINVAL if it
2097c91a719dSKyungmin Park  * is not.
2098c91a719dSKyungmin Park  */
2099*ff94bc40SHeiko Schocher static int self_check_in_wl_tree(const struct ubi_device *ubi,
2100*ff94bc40SHeiko Schocher 				 struct ubi_wl_entry *e, struct rb_root *root)
2101c91a719dSKyungmin Park {
2102*ff94bc40SHeiko Schocher 	if (!ubi_dbg_chk_gen(ubi))
2103*ff94bc40SHeiko Schocher 		return 0;
2104*ff94bc40SHeiko Schocher 
2105c91a719dSKyungmin Park 	if (in_wl_tree(e, root))
2106c91a719dSKyungmin Park 		return 0;
2107c91a719dSKyungmin Park 
2108*ff94bc40SHeiko Schocher 	ubi_err("self-check failed for PEB %d, EC %d, RB-tree %p ",
2109c91a719dSKyungmin Park 		e->pnum, e->ec, root);
2110*ff94bc40SHeiko Schocher 	dump_stack();
2111*ff94bc40SHeiko Schocher 	return -EINVAL;
2112c91a719dSKyungmin Park }
2113c91a719dSKyungmin Park 
2114*ff94bc40SHeiko Schocher /**
2115*ff94bc40SHeiko Schocher  * self_check_in_pq - check if wear-leveling entry is in the protection
2116*ff94bc40SHeiko Schocher  *                        queue.
2117*ff94bc40SHeiko Schocher  * @ubi: UBI device description object
2118*ff94bc40SHeiko Schocher  * @e: the wear-leveling entry to check
2119*ff94bc40SHeiko Schocher  *
2120*ff94bc40SHeiko Schocher  * This function returns zero if @e is in @ubi->pq and %-EINVAL if it is not.
2121*ff94bc40SHeiko Schocher  */
2122*ff94bc40SHeiko Schocher static int self_check_in_pq(const struct ubi_device *ubi,
2123*ff94bc40SHeiko Schocher 			    struct ubi_wl_entry *e)
2124*ff94bc40SHeiko Schocher {
2125*ff94bc40SHeiko Schocher 	struct ubi_wl_entry *p;
2126*ff94bc40SHeiko Schocher 	int i;
2127*ff94bc40SHeiko Schocher 
2128*ff94bc40SHeiko Schocher 	if (!ubi_dbg_chk_gen(ubi))
2129*ff94bc40SHeiko Schocher 		return 0;
2130*ff94bc40SHeiko Schocher 
2131*ff94bc40SHeiko Schocher 	for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
2132*ff94bc40SHeiko Schocher 		list_for_each_entry(p, &ubi->pq[i], u.list)
2133*ff94bc40SHeiko Schocher 			if (p == e)
2134*ff94bc40SHeiko Schocher 				return 0;
2135*ff94bc40SHeiko Schocher 
2136*ff94bc40SHeiko Schocher 	ubi_err("self-check failed for PEB %d, EC %d, Protect queue",
2137*ff94bc40SHeiko Schocher 		e->pnum, e->ec);
2138*ff94bc40SHeiko Schocher 	dump_stack();
2139*ff94bc40SHeiko Schocher 	return -EINVAL;
2140*ff94bc40SHeiko Schocher }
2141