xref: /openbmc/u-boot/drivers/mtd/ubi/wl.c (revision e8f80a5a)
1*83d290c5STom Rini // SPDX-License-Identifier: GPL-2.0+
2c91a719dSKyungmin Park /*
3c91a719dSKyungmin Park  * Copyright (c) International Business Machines Corp., 2006
4c91a719dSKyungmin Park  *
5c91a719dSKyungmin Park  * Authors: Artem Bityutskiy (Битюцкий Артём), Thomas Gleixner
6c91a719dSKyungmin Park  */
7c91a719dSKyungmin Park 
8c91a719dSKyungmin Park /*
9ff94bc40SHeiko Schocher  * UBI wear-leveling sub-system.
10c91a719dSKyungmin Park  *
11ff94bc40SHeiko Schocher  * This sub-system is responsible for wear-leveling. It works in terms of
12ff94bc40SHeiko Schocher  * physical eraseblocks and erase counters and knows nothing about logical
13ff94bc40SHeiko Schocher  * eraseblocks, volumes, etc. From this sub-system's perspective all physical
14ff94bc40SHeiko Schocher  * eraseblocks are of two types - used and free. Used physical eraseblocks are
15ff94bc40SHeiko Schocher  * those that were "get" by the 'ubi_wl_get_peb()' function, and free physical
16ff94bc40SHeiko Schocher  * eraseblocks are those that were put by the 'ubi_wl_put_peb()' function.
17c91a719dSKyungmin Park  *
18c91a719dSKyungmin Park  * Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter
19ff94bc40SHeiko Schocher  * header. The rest of the physical eraseblock contains only %0xFF bytes.
20c91a719dSKyungmin Park  *
21ff94bc40SHeiko Schocher  * When physical eraseblocks are returned to the WL sub-system by means of the
22c91a719dSKyungmin Park  * 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is
23c91a719dSKyungmin Park  * done asynchronously in context of the per-UBI device background thread,
24ff94bc40SHeiko Schocher  * which is also managed by the WL sub-system.
25c91a719dSKyungmin Park  *
26c91a719dSKyungmin Park  * The wear-leveling is ensured by means of moving the contents of used
27c91a719dSKyungmin Park  * physical eraseblocks with low erase counter to free physical eraseblocks
28c91a719dSKyungmin Park  * with high erase counter.
29c91a719dSKyungmin Park  *
30ff94bc40SHeiko Schocher  * If the WL sub-system fails to erase a physical eraseblock, it marks it as
31ff94bc40SHeiko Schocher  * bad.
32c91a719dSKyungmin Park  *
33ff94bc40SHeiko Schocher  * This sub-system is also responsible for scrubbing. If a bit-flip is detected
34ff94bc40SHeiko Schocher  * in a physical eraseblock, it has to be moved. Technically this is the same
35ff94bc40SHeiko Schocher  * as moving it for wear-leveling reasons.
36c91a719dSKyungmin Park  *
37ff94bc40SHeiko Schocher  * As it was said, for the UBI sub-system all physical eraseblocks are either
38ff94bc40SHeiko Schocher  * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while
39ff94bc40SHeiko Schocher  * used eraseblocks are kept in @wl->used, @wl->erroneous, or @wl->scrub
40ff94bc40SHeiko Schocher  * RB-trees, as well as (temporarily) in the @wl->pq queue.
41c91a719dSKyungmin Park  *
42ff94bc40SHeiko Schocher  * When the WL sub-system returns a physical eraseblock, the physical
43ff94bc40SHeiko Schocher  * eraseblock is protected from being moved for some "time". For this reason,
44ff94bc40SHeiko Schocher  * the physical eraseblock is not directly moved from the @wl->free tree to the
45ff94bc40SHeiko Schocher  * @wl->used tree. There is a protection queue in between where this
46ff94bc40SHeiko Schocher  * physical eraseblock is temporarily stored (@wl->pq).
47ff94bc40SHeiko Schocher  *
48ff94bc40SHeiko Schocher  * All this protection stuff is needed because:
49ff94bc40SHeiko Schocher  *  o we don't want to move physical eraseblocks just after we have given them
50ff94bc40SHeiko Schocher  *    to the user; instead, we first want to let users fill them up with data;
51ff94bc40SHeiko Schocher  *
52ff94bc40SHeiko Schocher  *  o there is a chance that the user will put the physical eraseblock very
53ff94bc40SHeiko Schocher  *    soon, so it makes sense not to move it for some time, but wait.
54ff94bc40SHeiko Schocher  *
55ff94bc40SHeiko Schocher  * Physical eraseblocks stay protected only for limited time. But the "time" is
56ff94bc40SHeiko Schocher  * measured in erase cycles in this case. This is implemented with help of the
57ff94bc40SHeiko Schocher  * protection queue. Eraseblocks are put to the tail of this queue when they
58ff94bc40SHeiko Schocher  * are returned by the 'ubi_wl_get_peb()', and eraseblocks are removed from the
59ff94bc40SHeiko Schocher  * head of the queue on each erase operation (for any eraseblock). So the
60ff94bc40SHeiko Schocher  * length of the queue defines how may (global) erase cycles PEBs are protected.
61ff94bc40SHeiko Schocher  *
62ff94bc40SHeiko Schocher  * To put it differently, each physical eraseblock has 2 main states: free and
63ff94bc40SHeiko Schocher  * used. The former state corresponds to the @wl->free tree. The latter state
64ff94bc40SHeiko Schocher  * is split up on several sub-states:
65ff94bc40SHeiko Schocher  * o the WL movement is allowed (@wl->used tree);
66ff94bc40SHeiko Schocher  * o the WL movement is disallowed (@wl->erroneous) because the PEB is
67ff94bc40SHeiko Schocher  *   erroneous - e.g., there was a read error;
68ff94bc40SHeiko Schocher  * o the WL movement is temporarily prohibited (@wl->pq queue);
69ff94bc40SHeiko Schocher  * o scrubbing is needed (@wl->scrub tree).
70ff94bc40SHeiko Schocher  *
71ff94bc40SHeiko Schocher  * Depending on the sub-state, wear-leveling entries of the used physical
72ff94bc40SHeiko Schocher  * eraseblocks may be kept in one of those structures.
73c91a719dSKyungmin Park  *
74c91a719dSKyungmin Park  * Note, in this implementation, we keep a small in-RAM object for each physical
75c91a719dSKyungmin Park  * eraseblock. This is surely not a scalable solution. But it appears to be good
76c91a719dSKyungmin Park  * enough for moderately large flashes and it is simple. In future, one may
77ff94bc40SHeiko Schocher  * re-work this sub-system and make it more scalable.
78c91a719dSKyungmin Park  *
79ff94bc40SHeiko Schocher  * At the moment this sub-system does not utilize the sequence number, which
80ff94bc40SHeiko Schocher  * was introduced relatively recently. But it would be wise to do this because
81ff94bc40SHeiko Schocher  * the sequence number of a logical eraseblock characterizes how old is it. For
82c91a719dSKyungmin Park  * example, when we move a PEB with low erase counter, and we need to pick the
83c91a719dSKyungmin Park  * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we
84c91a719dSKyungmin Park  * pick target PEB with an average EC if our PEB is not very "old". This is a
85ff94bc40SHeiko Schocher  * room for future re-works of the WL sub-system.
86c91a719dSKyungmin Park  */
87c91a719dSKyungmin Park 
88ff94bc40SHeiko Schocher #ifndef __UBOOT__
89c91a719dSKyungmin Park #include <linux/slab.h>
90c91a719dSKyungmin Park #include <linux/crc32.h>
91c91a719dSKyungmin Park #include <linux/freezer.h>
92c91a719dSKyungmin Park #include <linux/kthread.h>
93ff94bc40SHeiko Schocher #else
94ff94bc40SHeiko Schocher #include <ubi_uboot.h>
95c91a719dSKyungmin Park #endif
96c91a719dSKyungmin Park 
97c91a719dSKyungmin Park #include "ubi.h"
980195a7bbSHeiko Schocher #include "wl.h"
99c91a719dSKyungmin Park 
100c91a719dSKyungmin Park /* Number of physical eraseblocks reserved for wear-leveling purposes */
101c91a719dSKyungmin Park #define WL_RESERVED_PEBS 1
102c91a719dSKyungmin Park 
103c91a719dSKyungmin Park /*
104c91a719dSKyungmin Park  * Maximum difference between two erase counters. If this threshold is
105ff94bc40SHeiko Schocher  * exceeded, the WL sub-system starts moving data from used physical
106ff94bc40SHeiko Schocher  * eraseblocks with low erase counter to free physical eraseblocks with high
107ff94bc40SHeiko Schocher  * erase counter.
108c91a719dSKyungmin Park  */
109c91a719dSKyungmin Park #define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD
110c91a719dSKyungmin Park 
111c91a719dSKyungmin Park /*
112ff94bc40SHeiko Schocher  * When a physical eraseblock is moved, the WL sub-system has to pick the target
113c91a719dSKyungmin Park  * physical eraseblock to move to. The simplest way would be just to pick the
114c91a719dSKyungmin Park  * one with the highest erase counter. But in certain workloads this could lead
115c91a719dSKyungmin Park  * to an unlimited wear of one or few physical eraseblock. Indeed, imagine a
116c91a719dSKyungmin Park  * situation when the picked physical eraseblock is constantly erased after the
117c91a719dSKyungmin Park  * data is written to it. So, we have a constant which limits the highest erase
118ff94bc40SHeiko Schocher  * counter of the free physical eraseblock to pick. Namely, the WL sub-system
119ff94bc40SHeiko Schocher  * does not pick eraseblocks with erase counter greater than the lowest erase
120c91a719dSKyungmin Park  * counter plus %WL_FREE_MAX_DIFF.
121c91a719dSKyungmin Park  */
122c91a719dSKyungmin Park #define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD)
123c91a719dSKyungmin Park 
124c91a719dSKyungmin Park /*
125c91a719dSKyungmin Park  * Maximum number of consecutive background thread failures which is enough to
126c91a719dSKyungmin Park  * switch to read-only mode.
127c91a719dSKyungmin Park  */
128c91a719dSKyungmin Park #define WL_MAX_FAILURES 32
129c91a719dSKyungmin Park 
130ff94bc40SHeiko Schocher static int self_check_ec(struct ubi_device *ubi, int pnum, int ec);
131ff94bc40SHeiko Schocher static int self_check_in_wl_tree(const struct ubi_device *ubi,
132ff94bc40SHeiko Schocher 				 struct ubi_wl_entry *e, struct rb_root *root);
133ff94bc40SHeiko Schocher static int self_check_in_pq(const struct ubi_device *ubi,
134ff94bc40SHeiko Schocher 			    struct ubi_wl_entry *e);
135ff94bc40SHeiko Schocher 
136c91a719dSKyungmin Park /**
137c91a719dSKyungmin Park  * wl_tree_add - add a wear-leveling entry to a WL RB-tree.
138c91a719dSKyungmin Park  * @e: the wear-leveling entry to add
139c91a719dSKyungmin Park  * @root: the root of the tree
140c91a719dSKyungmin Park  *
141c91a719dSKyungmin Park  * Note, we use (erase counter, physical eraseblock number) pairs as keys in
142c91a719dSKyungmin Park  * the @ubi->used and @ubi->free RB-trees.
143c91a719dSKyungmin Park  */
wl_tree_add(struct ubi_wl_entry * e,struct rb_root * root)144c91a719dSKyungmin Park static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
145c91a719dSKyungmin Park {
146c91a719dSKyungmin Park 	struct rb_node **p, *parent = NULL;
147c91a719dSKyungmin Park 
148c91a719dSKyungmin Park 	p = &root->rb_node;
149c91a719dSKyungmin Park 	while (*p) {
150c91a719dSKyungmin Park 		struct ubi_wl_entry *e1;
151c91a719dSKyungmin Park 
152c91a719dSKyungmin Park 		parent = *p;
153ff94bc40SHeiko Schocher 		e1 = rb_entry(parent, struct ubi_wl_entry, u.rb);
154c91a719dSKyungmin Park 
155c91a719dSKyungmin Park 		if (e->ec < e1->ec)
156c91a719dSKyungmin Park 			p = &(*p)->rb_left;
157c91a719dSKyungmin Park 		else if (e->ec > e1->ec)
158c91a719dSKyungmin Park 			p = &(*p)->rb_right;
159c91a719dSKyungmin Park 		else {
160c91a719dSKyungmin Park 			ubi_assert(e->pnum != e1->pnum);
161c91a719dSKyungmin Park 			if (e->pnum < e1->pnum)
162c91a719dSKyungmin Park 				p = &(*p)->rb_left;
163c91a719dSKyungmin Park 			else
164c91a719dSKyungmin Park 				p = &(*p)->rb_right;
165c91a719dSKyungmin Park 		}
166c91a719dSKyungmin Park 	}
167c91a719dSKyungmin Park 
168ff94bc40SHeiko Schocher 	rb_link_node(&e->u.rb, parent, p);
169ff94bc40SHeiko Schocher 	rb_insert_color(&e->u.rb, root);
170c91a719dSKyungmin Park }
171c91a719dSKyungmin Park 
172c91a719dSKyungmin Park /**
1730195a7bbSHeiko Schocher  * wl_tree_destroy - destroy a wear-leveling entry.
1740195a7bbSHeiko Schocher  * @ubi: UBI device description object
1750195a7bbSHeiko Schocher  * @e: the wear-leveling entry to add
1760195a7bbSHeiko Schocher  *
1770195a7bbSHeiko Schocher  * This function destroys a wear leveling entry and removes
1780195a7bbSHeiko Schocher  * the reference from the lookup table.
1790195a7bbSHeiko Schocher  */
wl_entry_destroy(struct ubi_device * ubi,struct ubi_wl_entry * e)1800195a7bbSHeiko Schocher static void wl_entry_destroy(struct ubi_device *ubi, struct ubi_wl_entry *e)
1810195a7bbSHeiko Schocher {
1820195a7bbSHeiko Schocher 	ubi->lookuptbl[e->pnum] = NULL;
1830195a7bbSHeiko Schocher 	kmem_cache_free(ubi_wl_entry_slab, e);
1840195a7bbSHeiko Schocher }
1850195a7bbSHeiko Schocher 
1860195a7bbSHeiko Schocher /**
187c91a719dSKyungmin Park  * do_work - do one pending work.
188c91a719dSKyungmin Park  * @ubi: UBI device description object
189c91a719dSKyungmin Park  *
190c91a719dSKyungmin Park  * This function returns zero in case of success and a negative error code in
191c91a719dSKyungmin Park  * case of failure.
192c91a719dSKyungmin Park  */
do_work(struct ubi_device * ubi)193c91a719dSKyungmin Park static int do_work(struct ubi_device *ubi)
194c91a719dSKyungmin Park {
195c91a719dSKyungmin Park 	int err;
196c91a719dSKyungmin Park 	struct ubi_work *wrk;
197c91a719dSKyungmin Park 
198c91a719dSKyungmin Park 	cond_resched();
199c91a719dSKyungmin Park 
200c91a719dSKyungmin Park 	/*
201c91a719dSKyungmin Park 	 * @ubi->work_sem is used to synchronize with the workers. Workers take
202c91a719dSKyungmin Park 	 * it in read mode, so many of them may be doing works at a time. But
203c91a719dSKyungmin Park 	 * the queue flush code has to be sure the whole queue of works is
204c91a719dSKyungmin Park 	 * done, and it takes the mutex in write mode.
205c91a719dSKyungmin Park 	 */
206c91a719dSKyungmin Park 	down_read(&ubi->work_sem);
207c91a719dSKyungmin Park 	spin_lock(&ubi->wl_lock);
208c91a719dSKyungmin Park 	if (list_empty(&ubi->works)) {
209c91a719dSKyungmin Park 		spin_unlock(&ubi->wl_lock);
210c91a719dSKyungmin Park 		up_read(&ubi->work_sem);
211c91a719dSKyungmin Park 		return 0;
212c91a719dSKyungmin Park 	}
213c91a719dSKyungmin Park 
214c91a719dSKyungmin Park 	wrk = list_entry(ubi->works.next, struct ubi_work, list);
215c91a719dSKyungmin Park 	list_del(&wrk->list);
216c91a719dSKyungmin Park 	ubi->works_count -= 1;
217c91a719dSKyungmin Park 	ubi_assert(ubi->works_count >= 0);
218c91a719dSKyungmin Park 	spin_unlock(&ubi->wl_lock);
219c91a719dSKyungmin Park 
220c91a719dSKyungmin Park 	/*
221c91a719dSKyungmin Park 	 * Call the worker function. Do not touch the work structure
222c91a719dSKyungmin Park 	 * after this call as it will have been freed or reused by that
223c91a719dSKyungmin Park 	 * time by the worker function.
224c91a719dSKyungmin Park 	 */
225c91a719dSKyungmin Park 	err = wrk->func(ubi, wrk, 0);
226c91a719dSKyungmin Park 	if (err)
2270195a7bbSHeiko Schocher 		ubi_err(ubi, "work failed with error code %d", err);
228c91a719dSKyungmin Park 	up_read(&ubi->work_sem);
229c91a719dSKyungmin Park 
230c91a719dSKyungmin Park 	return err;
231c91a719dSKyungmin Park }
232c91a719dSKyungmin Park 
233c91a719dSKyungmin Park /**
234c91a719dSKyungmin Park  * in_wl_tree - check if wear-leveling entry is present in a WL RB-tree.
235c91a719dSKyungmin Park  * @e: the wear-leveling entry to check
236c91a719dSKyungmin Park  * @root: the root of the tree
237c91a719dSKyungmin Park  *
238c91a719dSKyungmin Park  * This function returns non-zero if @e is in the @root RB-tree and zero if it
239c91a719dSKyungmin Park  * is not.
240c91a719dSKyungmin Park  */
in_wl_tree(struct ubi_wl_entry * e,struct rb_root * root)241c91a719dSKyungmin Park static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
242c91a719dSKyungmin Park {
243c91a719dSKyungmin Park 	struct rb_node *p;
244c91a719dSKyungmin Park 
245c91a719dSKyungmin Park 	p = root->rb_node;
246c91a719dSKyungmin Park 	while (p) {
247c91a719dSKyungmin Park 		struct ubi_wl_entry *e1;
248c91a719dSKyungmin Park 
249ff94bc40SHeiko Schocher 		e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
250c91a719dSKyungmin Park 
251c91a719dSKyungmin Park 		if (e->pnum == e1->pnum) {
252c91a719dSKyungmin Park 			ubi_assert(e == e1);
253c91a719dSKyungmin Park 			return 1;
254c91a719dSKyungmin Park 		}
255c91a719dSKyungmin Park 
256c91a719dSKyungmin Park 		if (e->ec < e1->ec)
257c91a719dSKyungmin Park 			p = p->rb_left;
258c91a719dSKyungmin Park 		else if (e->ec > e1->ec)
259c91a719dSKyungmin Park 			p = p->rb_right;
260c91a719dSKyungmin Park 		else {
261c91a719dSKyungmin Park 			ubi_assert(e->pnum != e1->pnum);
262c91a719dSKyungmin Park 			if (e->pnum < e1->pnum)
263c91a719dSKyungmin Park 				p = p->rb_left;
264c91a719dSKyungmin Park 			else
265c91a719dSKyungmin Park 				p = p->rb_right;
266c91a719dSKyungmin Park 		}
267c91a719dSKyungmin Park 	}
268c91a719dSKyungmin Park 
269c91a719dSKyungmin Park 	return 0;
270c91a719dSKyungmin Park }
271c91a719dSKyungmin Park 
272c91a719dSKyungmin Park /**
273ff94bc40SHeiko Schocher  * prot_queue_add - add physical eraseblock to the protection queue.
274c91a719dSKyungmin Park  * @ubi: UBI device description object
275c91a719dSKyungmin Park  * @e: the physical eraseblock to add
276c91a719dSKyungmin Park  *
277ff94bc40SHeiko Schocher  * This function adds @e to the tail of the protection queue @ubi->pq, where
278ff94bc40SHeiko Schocher  * @e will stay for %UBI_PROT_QUEUE_LEN erase operations and will be
279ff94bc40SHeiko Schocher  * temporarily protected from the wear-leveling worker. Note, @wl->lock has to
280ff94bc40SHeiko Schocher  * be locked.
281c91a719dSKyungmin Park  */
prot_queue_add(struct ubi_device * ubi,struct ubi_wl_entry * e)282ff94bc40SHeiko Schocher static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
283c91a719dSKyungmin Park {
284ff94bc40SHeiko Schocher 	int pq_tail = ubi->pq_head - 1;
285c91a719dSKyungmin Park 
286ff94bc40SHeiko Schocher 	if (pq_tail < 0)
287ff94bc40SHeiko Schocher 		pq_tail = UBI_PROT_QUEUE_LEN - 1;
288ff94bc40SHeiko Schocher 	ubi_assert(pq_tail >= 0 && pq_tail < UBI_PROT_QUEUE_LEN);
289ff94bc40SHeiko Schocher 	list_add_tail(&e->u.list, &ubi->pq[pq_tail]);
290ff94bc40SHeiko Schocher 	dbg_wl("added PEB %d EC %d to the protection queue", e->pnum, e->ec);
291c91a719dSKyungmin Park }
292c91a719dSKyungmin Park 
293c91a719dSKyungmin Park /**
294c91a719dSKyungmin Park  * find_wl_entry - find wear-leveling entry closest to certain erase counter.
295ff94bc40SHeiko Schocher  * @ubi: UBI device description object
296c91a719dSKyungmin Park  * @root: the RB-tree where to look for
297ff94bc40SHeiko Schocher  * @diff: maximum possible difference from the smallest erase counter
298c91a719dSKyungmin Park  *
299c91a719dSKyungmin Park  * This function looks for a wear leveling entry with erase counter closest to
300ff94bc40SHeiko Schocher  * min + @diff, where min is the smallest erase counter.
301c91a719dSKyungmin Park  */
find_wl_entry(struct ubi_device * ubi,struct rb_root * root,int diff)302ff94bc40SHeiko Schocher static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
303ff94bc40SHeiko Schocher 					  struct rb_root *root, int diff)
304c91a719dSKyungmin Park {
305c91a719dSKyungmin Park 	struct rb_node *p;
306ff94bc40SHeiko Schocher 	struct ubi_wl_entry *e, *prev_e = NULL;
307ff94bc40SHeiko Schocher 	int max;
308c91a719dSKyungmin Park 
309ff94bc40SHeiko Schocher 	e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
310ff94bc40SHeiko Schocher 	max = e->ec + diff;
311c91a719dSKyungmin Park 
312c91a719dSKyungmin Park 	p = root->rb_node;
313c91a719dSKyungmin Park 	while (p) {
314c91a719dSKyungmin Park 		struct ubi_wl_entry *e1;
315c91a719dSKyungmin Park 
316ff94bc40SHeiko Schocher 		e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
317c91a719dSKyungmin Park 		if (e1->ec >= max)
318c91a719dSKyungmin Park 			p = p->rb_left;
319c91a719dSKyungmin Park 		else {
320c91a719dSKyungmin Park 			p = p->rb_right;
321ff94bc40SHeiko Schocher 			prev_e = e;
322c91a719dSKyungmin Park 			e = e1;
323c91a719dSKyungmin Park 		}
324c91a719dSKyungmin Park 	}
325c91a719dSKyungmin Park 
326ff94bc40SHeiko Schocher 	/* If no fastmap has been written and this WL entry can be used
327ff94bc40SHeiko Schocher 	 * as anchor PEB, hold it back and return the second best WL entry
328ff94bc40SHeiko Schocher 	 * such that fastmap can use the anchor PEB later. */
329ff94bc40SHeiko Schocher 	if (prev_e && !ubi->fm_disabled &&
330ff94bc40SHeiko Schocher 	    !ubi->fm && e->pnum < UBI_FM_MAX_START)
331ff94bc40SHeiko Schocher 		return prev_e;
332ff94bc40SHeiko Schocher 
333c91a719dSKyungmin Park 	return e;
334c91a719dSKyungmin Park }
335c91a719dSKyungmin Park 
336c91a719dSKyungmin Park /**
337ff94bc40SHeiko Schocher  * find_mean_wl_entry - find wear-leveling entry with medium erase counter.
338c91a719dSKyungmin Park  * @ubi: UBI device description object
339ff94bc40SHeiko Schocher  * @root: the RB-tree where to look for
340c91a719dSKyungmin Park  *
341ff94bc40SHeiko Schocher  * This function looks for a wear leveling entry with medium erase counter,
342ff94bc40SHeiko Schocher  * but not greater or equivalent than the lowest erase counter plus
343ff94bc40SHeiko Schocher  * %WL_FREE_MAX_DIFF/2.
344c91a719dSKyungmin Park  */
find_mean_wl_entry(struct ubi_device * ubi,struct rb_root * root)345ff94bc40SHeiko Schocher static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi,
346ff94bc40SHeiko Schocher 					       struct rb_root *root)
347c91a719dSKyungmin Park {
348c91a719dSKyungmin Park 	struct ubi_wl_entry *e, *first, *last;
349c91a719dSKyungmin Park 
350ff94bc40SHeiko Schocher 	first = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
351ff94bc40SHeiko Schocher 	last = rb_entry(rb_last(root), struct ubi_wl_entry, u.rb);
352c91a719dSKyungmin Park 
353ff94bc40SHeiko Schocher 	if (last->ec - first->ec < WL_FREE_MAX_DIFF) {
354ff94bc40SHeiko Schocher 		e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb);
355c91a719dSKyungmin Park 
356ff94bc40SHeiko Schocher 		/* If no fastmap has been written and this WL entry can be used
357ff94bc40SHeiko Schocher 		 * as anchor PEB, hold it back and return the second best
358ff94bc40SHeiko Schocher 		 * WL entry such that fastmap can use the anchor PEB later. */
3590195a7bbSHeiko Schocher 		e = may_reserve_for_fm(ubi, e, root);
360ff94bc40SHeiko Schocher 	} else
361ff94bc40SHeiko Schocher 		e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2);
362c91a719dSKyungmin Park 
363ff94bc40SHeiko Schocher 	return e;
364c91a719dSKyungmin Park }
365c91a719dSKyungmin Park 
366ff94bc40SHeiko Schocher /**
3670195a7bbSHeiko Schocher  * wl_get_wle - get a mean wl entry to be used by ubi_wl_get_peb() or
3680195a7bbSHeiko Schocher  * refill_wl_user_pool().
369ff94bc40SHeiko Schocher  * @ubi: UBI device description object
370ff94bc40SHeiko Schocher  *
3710195a7bbSHeiko Schocher  * This function returns a a wear leveling entry in case of success and
3720195a7bbSHeiko Schocher  * NULL in case of failure.
373ff94bc40SHeiko Schocher  */
wl_get_wle(struct ubi_device * ubi)3740195a7bbSHeiko Schocher static struct ubi_wl_entry *wl_get_wle(struct ubi_device *ubi)
375ff94bc40SHeiko Schocher {
376ff94bc40SHeiko Schocher 	struct ubi_wl_entry *e;
377ff94bc40SHeiko Schocher 
378ff94bc40SHeiko Schocher 	e = find_mean_wl_entry(ubi, &ubi->free);
379ff94bc40SHeiko Schocher 	if (!e) {
3800195a7bbSHeiko Schocher 		ubi_err(ubi, "no free eraseblocks");
3810195a7bbSHeiko Schocher 		return NULL;
382ff94bc40SHeiko Schocher 	}
383ff94bc40SHeiko Schocher 
384ff94bc40SHeiko Schocher 	self_check_in_wl_tree(ubi, e, &ubi->free);
385ff94bc40SHeiko Schocher 
386ff94bc40SHeiko Schocher 	/*
387ff94bc40SHeiko Schocher 	 * Move the physical eraseblock to the protection queue where it will
388ff94bc40SHeiko Schocher 	 * be protected from being moved for some time.
389ff94bc40SHeiko Schocher 	 */
390ff94bc40SHeiko Schocher 	rb_erase(&e->u.rb, &ubi->free);
391ff94bc40SHeiko Schocher 	ubi->free_count--;
392ff94bc40SHeiko Schocher 	dbg_wl("PEB %d EC %d", e->pnum, e->ec);
393ff94bc40SHeiko Schocher 
394ff94bc40SHeiko Schocher 	return e;
395ff94bc40SHeiko Schocher }
396ff94bc40SHeiko Schocher 
397ff94bc40SHeiko Schocher /**
398ff94bc40SHeiko Schocher  * prot_queue_del - remove a physical eraseblock from the protection queue.
399c91a719dSKyungmin Park  * @ubi: UBI device description object
400c91a719dSKyungmin Park  * @pnum: the physical eraseblock to remove
401c91a719dSKyungmin Park  *
402ff94bc40SHeiko Schocher  * This function deletes PEB @pnum from the protection queue and returns zero
403ff94bc40SHeiko Schocher  * in case of success and %-ENODEV if the PEB was not found.
404c91a719dSKyungmin Park  */
prot_queue_del(struct ubi_device * ubi,int pnum)405ff94bc40SHeiko Schocher static int prot_queue_del(struct ubi_device *ubi, int pnum)
406c91a719dSKyungmin Park {
407ff94bc40SHeiko Schocher 	struct ubi_wl_entry *e;
408c91a719dSKyungmin Park 
409ff94bc40SHeiko Schocher 	e = ubi->lookuptbl[pnum];
410ff94bc40SHeiko Schocher 	if (!e)
411c91a719dSKyungmin Park 		return -ENODEV;
412c91a719dSKyungmin Park 
413ff94bc40SHeiko Schocher 	if (self_check_in_pq(ubi, e))
414ff94bc40SHeiko Schocher 		return -ENODEV;
415ff94bc40SHeiko Schocher 
416ff94bc40SHeiko Schocher 	list_del(&e->u.list);
417ff94bc40SHeiko Schocher 	dbg_wl("deleted PEB %d from the protection queue", e->pnum);
418c91a719dSKyungmin Park 	return 0;
419c91a719dSKyungmin Park }
420c91a719dSKyungmin Park 
421c91a719dSKyungmin Park /**
422c91a719dSKyungmin Park  * sync_erase - synchronously erase a physical eraseblock.
423c91a719dSKyungmin Park  * @ubi: UBI device description object
424c91a719dSKyungmin Park  * @e: the the physical eraseblock to erase
425c91a719dSKyungmin Park  * @torture: if the physical eraseblock has to be tortured
426c91a719dSKyungmin Park  *
427c91a719dSKyungmin Park  * This function returns zero in case of success and a negative error code in
428c91a719dSKyungmin Park  * case of failure.
429c91a719dSKyungmin Park  */
sync_erase(struct ubi_device * ubi,struct ubi_wl_entry * e,int torture)430ff94bc40SHeiko Schocher static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
431ff94bc40SHeiko Schocher 		      int torture)
432c91a719dSKyungmin Park {
433c91a719dSKyungmin Park 	int err;
434c91a719dSKyungmin Park 	struct ubi_ec_hdr *ec_hdr;
435c91a719dSKyungmin Park 	unsigned long long ec = e->ec;
436c91a719dSKyungmin Park 
437c91a719dSKyungmin Park 	dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec);
438c91a719dSKyungmin Park 
439ff94bc40SHeiko Schocher 	err = self_check_ec(ubi, e->pnum, e->ec);
440ff94bc40SHeiko Schocher 	if (err)
441c91a719dSKyungmin Park 		return -EINVAL;
442c91a719dSKyungmin Park 
443c91a719dSKyungmin Park 	ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
444c91a719dSKyungmin Park 	if (!ec_hdr)
445c91a719dSKyungmin Park 		return -ENOMEM;
446c91a719dSKyungmin Park 
447c91a719dSKyungmin Park 	err = ubi_io_sync_erase(ubi, e->pnum, torture);
448c91a719dSKyungmin Park 	if (err < 0)
449c91a719dSKyungmin Park 		goto out_free;
450c91a719dSKyungmin Park 
451c91a719dSKyungmin Park 	ec += err;
452c91a719dSKyungmin Park 	if (ec > UBI_MAX_ERASECOUNTER) {
453c91a719dSKyungmin Park 		/*
454c91a719dSKyungmin Park 		 * Erase counter overflow. Upgrade UBI and use 64-bit
455c91a719dSKyungmin Park 		 * erase counters internally.
456c91a719dSKyungmin Park 		 */
4570195a7bbSHeiko Schocher 		ubi_err(ubi, "erase counter overflow at PEB %d, EC %llu",
458c91a719dSKyungmin Park 			e->pnum, ec);
459c91a719dSKyungmin Park 		err = -EINVAL;
460c91a719dSKyungmin Park 		goto out_free;
461c91a719dSKyungmin Park 	}
462c91a719dSKyungmin Park 
463c91a719dSKyungmin Park 	dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec);
464c91a719dSKyungmin Park 
465c91a719dSKyungmin Park 	ec_hdr->ec = cpu_to_be64(ec);
466c91a719dSKyungmin Park 
467c91a719dSKyungmin Park 	err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr);
468c91a719dSKyungmin Park 	if (err)
469c91a719dSKyungmin Park 		goto out_free;
470c91a719dSKyungmin Park 
471c91a719dSKyungmin Park 	e->ec = ec;
472c91a719dSKyungmin Park 	spin_lock(&ubi->wl_lock);
473c91a719dSKyungmin Park 	if (e->ec > ubi->max_ec)
474c91a719dSKyungmin Park 		ubi->max_ec = e->ec;
475c91a719dSKyungmin Park 	spin_unlock(&ubi->wl_lock);
476c91a719dSKyungmin Park 
477c91a719dSKyungmin Park out_free:
478c91a719dSKyungmin Park 	kfree(ec_hdr);
479c91a719dSKyungmin Park 	return err;
480c91a719dSKyungmin Park }
481c91a719dSKyungmin Park 
482c91a719dSKyungmin Park /**
483ff94bc40SHeiko Schocher  * serve_prot_queue - check if it is time to stop protecting PEBs.
484c91a719dSKyungmin Park  * @ubi: UBI device description object
485c91a719dSKyungmin Park  *
486ff94bc40SHeiko Schocher  * This function is called after each erase operation and removes PEBs from the
487ff94bc40SHeiko Schocher  * tail of the protection queue. These PEBs have been protected for long enough
488ff94bc40SHeiko Schocher  * and should be moved to the used tree.
489c91a719dSKyungmin Park  */
serve_prot_queue(struct ubi_device * ubi)490ff94bc40SHeiko Schocher static void serve_prot_queue(struct ubi_device *ubi)
491c91a719dSKyungmin Park {
492ff94bc40SHeiko Schocher 	struct ubi_wl_entry *e, *tmp;
493ff94bc40SHeiko Schocher 	int count;
494c91a719dSKyungmin Park 
495c91a719dSKyungmin Park 	/*
496c91a719dSKyungmin Park 	 * There may be several protected physical eraseblock to remove,
497c91a719dSKyungmin Park 	 * process them all.
498c91a719dSKyungmin Park 	 */
499ff94bc40SHeiko Schocher repeat:
500ff94bc40SHeiko Schocher 	count = 0;
501c91a719dSKyungmin Park 	spin_lock(&ubi->wl_lock);
502ff94bc40SHeiko Schocher 	list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) {
503ff94bc40SHeiko Schocher 		dbg_wl("PEB %d EC %d protection over, move to used tree",
504ff94bc40SHeiko Schocher 			e->pnum, e->ec);
505ff94bc40SHeiko Schocher 
506ff94bc40SHeiko Schocher 		list_del(&e->u.list);
507ff94bc40SHeiko Schocher 		wl_tree_add(e, &ubi->used);
508ff94bc40SHeiko Schocher 		if (count++ > 32) {
509ff94bc40SHeiko Schocher 			/*
510ff94bc40SHeiko Schocher 			 * Let's be nice and avoid holding the spinlock for
511ff94bc40SHeiko Schocher 			 * too long.
512ff94bc40SHeiko Schocher 			 */
513c91a719dSKyungmin Park 			spin_unlock(&ubi->wl_lock);
514c91a719dSKyungmin Park 			cond_resched();
515ff94bc40SHeiko Schocher 			goto repeat;
516c91a719dSKyungmin Park 		}
517c91a719dSKyungmin Park 	}
518c91a719dSKyungmin Park 
519ff94bc40SHeiko Schocher 	ubi->pq_head += 1;
520ff94bc40SHeiko Schocher 	if (ubi->pq_head == UBI_PROT_QUEUE_LEN)
521ff94bc40SHeiko Schocher 		ubi->pq_head = 0;
522ff94bc40SHeiko Schocher 	ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN);
523ff94bc40SHeiko Schocher 	spin_unlock(&ubi->wl_lock);
524ff94bc40SHeiko Schocher }
525ff94bc40SHeiko Schocher 
526f82290afSRichard Weinberger #ifdef __UBOOT__
ubi_do_worker(struct ubi_device * ubi)527f82290afSRichard Weinberger void ubi_do_worker(struct ubi_device *ubi)
528f82290afSRichard Weinberger {
529f82290afSRichard Weinberger 	int err;
530f82290afSRichard Weinberger 
531f82290afSRichard Weinberger 	if (list_empty(&ubi->works) || ubi->ro_mode ||
532f82290afSRichard Weinberger 	    !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi))
533f82290afSRichard Weinberger 		return;
534f82290afSRichard Weinberger 
535f82290afSRichard Weinberger 	spin_lock(&ubi->wl_lock);
536f82290afSRichard Weinberger 	while (!list_empty(&ubi->works)) {
537f82290afSRichard Weinberger 		/*
538f82290afSRichard Weinberger 		 * call do_work, which executes exactly one work form the queue,
539f82290afSRichard Weinberger 		 * including removeing it from the work queue.
540f82290afSRichard Weinberger 		 */
541f82290afSRichard Weinberger 		spin_unlock(&ubi->wl_lock);
542f82290afSRichard Weinberger 		err = do_work(ubi);
543f82290afSRichard Weinberger 		spin_lock(&ubi->wl_lock);
544f82290afSRichard Weinberger 		if (err) {
545f82290afSRichard Weinberger 			ubi_err(ubi, "%s: work failed with error code %d",
546f82290afSRichard Weinberger 				ubi->bgt_name, err);
547f82290afSRichard Weinberger 		}
548f82290afSRichard Weinberger 	}
549f82290afSRichard Weinberger 	spin_unlock(&ubi->wl_lock);
550f82290afSRichard Weinberger }
551f82290afSRichard Weinberger #endif
552f82290afSRichard Weinberger 
553ff94bc40SHeiko Schocher /**
554ff94bc40SHeiko Schocher  * __schedule_ubi_work - schedule a work.
555ff94bc40SHeiko Schocher  * @ubi: UBI device description object
556ff94bc40SHeiko Schocher  * @wrk: the work to schedule
557ff94bc40SHeiko Schocher  *
558ff94bc40SHeiko Schocher  * This function adds a work defined by @wrk to the tail of the pending works
5590195a7bbSHeiko Schocher  * list. Can only be used if ubi->work_sem is already held in read mode!
560ff94bc40SHeiko Schocher  */
__schedule_ubi_work(struct ubi_device * ubi,struct ubi_work * wrk)561ff94bc40SHeiko Schocher static void __schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
562ff94bc40SHeiko Schocher {
563ff94bc40SHeiko Schocher 	spin_lock(&ubi->wl_lock);
564ff94bc40SHeiko Schocher 	list_add_tail(&wrk->list, &ubi->works);
565ff94bc40SHeiko Schocher 	ubi_assert(ubi->works_count >= 0);
566ff94bc40SHeiko Schocher 	ubi->works_count += 1;
567ff94bc40SHeiko Schocher #ifndef __UBOOT__
568ff94bc40SHeiko Schocher 	if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled(ubi))
569ff94bc40SHeiko Schocher 		wake_up_process(ubi->bgt_thread);
570ff94bc40SHeiko Schocher #endif
571ff94bc40SHeiko Schocher 	spin_unlock(&ubi->wl_lock);
572ff94bc40SHeiko Schocher }
573ff94bc40SHeiko Schocher 
574c91a719dSKyungmin Park /**
575c91a719dSKyungmin Park  * schedule_ubi_work - schedule a work.
576c91a719dSKyungmin Park  * @ubi: UBI device description object
577c91a719dSKyungmin Park  * @wrk: the work to schedule
578c91a719dSKyungmin Park  *
579ff94bc40SHeiko Schocher  * This function adds a work defined by @wrk to the tail of the pending works
580ff94bc40SHeiko Schocher  * list.
581c91a719dSKyungmin Park  */
schedule_ubi_work(struct ubi_device * ubi,struct ubi_work * wrk)582c91a719dSKyungmin Park static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
583c91a719dSKyungmin Park {
584ff94bc40SHeiko Schocher 	down_read(&ubi->work_sem);
585ff94bc40SHeiko Schocher 	__schedule_ubi_work(ubi, wrk);
586ff94bc40SHeiko Schocher 	up_read(&ubi->work_sem);
587c91a719dSKyungmin Park }
588c91a719dSKyungmin Park 
589c91a719dSKyungmin Park static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
5900195a7bbSHeiko Schocher 			int shutdown);
591ff94bc40SHeiko Schocher 
592c91a719dSKyungmin Park /**
593c91a719dSKyungmin Park  * schedule_erase - schedule an erase work.
594c91a719dSKyungmin Park  * @ubi: UBI device description object
595c91a719dSKyungmin Park  * @e: the WL entry of the physical eraseblock to erase
596ff94bc40SHeiko Schocher  * @vol_id: the volume ID that last used this PEB
597ff94bc40SHeiko Schocher  * @lnum: the last used logical eraseblock number for the PEB
598c91a719dSKyungmin Park  * @torture: if the physical eraseblock has to be tortured
599c91a719dSKyungmin Park  *
600c91a719dSKyungmin Park  * This function returns zero in case of success and a %-ENOMEM in case of
601c91a719dSKyungmin Park  * failure.
602c91a719dSKyungmin Park  */
schedule_erase(struct ubi_device * ubi,struct ubi_wl_entry * e,int vol_id,int lnum,int torture)603c91a719dSKyungmin Park static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
604ff94bc40SHeiko Schocher 			  int vol_id, int lnum, int torture)
605c91a719dSKyungmin Park {
606c91a719dSKyungmin Park 	struct ubi_work *wl_wrk;
607c91a719dSKyungmin Park 
608ff94bc40SHeiko Schocher 	ubi_assert(e);
609ff94bc40SHeiko Schocher 
610c91a719dSKyungmin Park 	dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
611c91a719dSKyungmin Park 	       e->pnum, e->ec, torture);
612c91a719dSKyungmin Park 
613c91a719dSKyungmin Park 	wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
614c91a719dSKyungmin Park 	if (!wl_wrk)
615c91a719dSKyungmin Park 		return -ENOMEM;
616c91a719dSKyungmin Park 
617c91a719dSKyungmin Park 	wl_wrk->func = &erase_worker;
618c91a719dSKyungmin Park 	wl_wrk->e = e;
619ff94bc40SHeiko Schocher 	wl_wrk->vol_id = vol_id;
620ff94bc40SHeiko Schocher 	wl_wrk->lnum = lnum;
621c91a719dSKyungmin Park 	wl_wrk->torture = torture;
622c91a719dSKyungmin Park 
623c91a719dSKyungmin Park 	schedule_ubi_work(ubi, wl_wrk);
624f82290afSRichard Weinberger 
625f82290afSRichard Weinberger #ifdef __UBOOT__
626f82290afSRichard Weinberger 	ubi_do_worker(ubi);
627f82290afSRichard Weinberger #endif
628c91a719dSKyungmin Park 	return 0;
629c91a719dSKyungmin Park }
630c91a719dSKyungmin Park 
631c91a719dSKyungmin Park /**
632ff94bc40SHeiko Schocher  * do_sync_erase - run the erase worker synchronously.
633ff94bc40SHeiko Schocher  * @ubi: UBI device description object
634ff94bc40SHeiko Schocher  * @e: the WL entry of the physical eraseblock to erase
635ff94bc40SHeiko Schocher  * @vol_id: the volume ID that last used this PEB
636ff94bc40SHeiko Schocher  * @lnum: the last used logical eraseblock number for the PEB
637ff94bc40SHeiko Schocher  * @torture: if the physical eraseblock has to be tortured
638ff94bc40SHeiko Schocher  *
639ff94bc40SHeiko Schocher  */
do_sync_erase(struct ubi_device * ubi,struct ubi_wl_entry * e,int vol_id,int lnum,int torture)640ff94bc40SHeiko Schocher static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
641ff94bc40SHeiko Schocher 			 int vol_id, int lnum, int torture)
642ff94bc40SHeiko Schocher {
643ff94bc40SHeiko Schocher 	struct ubi_work *wl_wrk;
644ff94bc40SHeiko Schocher 
645ff94bc40SHeiko Schocher 	dbg_wl("sync erase of PEB %i", e->pnum);
646ff94bc40SHeiko Schocher 
647ff94bc40SHeiko Schocher 	wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
648ff94bc40SHeiko Schocher 	if (!wl_wrk)
649ff94bc40SHeiko Schocher 		return -ENOMEM;
650ff94bc40SHeiko Schocher 
651ff94bc40SHeiko Schocher 	wl_wrk->e = e;
652ff94bc40SHeiko Schocher 	wl_wrk->vol_id = vol_id;
653ff94bc40SHeiko Schocher 	wl_wrk->lnum = lnum;
654ff94bc40SHeiko Schocher 	wl_wrk->torture = torture;
655ff94bc40SHeiko Schocher 
656ff94bc40SHeiko Schocher 	return erase_worker(ubi, wl_wrk, 0);
657ff94bc40SHeiko Schocher }
658ff94bc40SHeiko Schocher 
659ff94bc40SHeiko Schocher /**
660c91a719dSKyungmin Park  * wear_leveling_worker - wear-leveling worker function.
661c91a719dSKyungmin Park  * @ubi: UBI device description object
662c91a719dSKyungmin Park  * @wrk: the work object
6630195a7bbSHeiko Schocher  * @shutdown: non-zero if the worker has to free memory and exit
6640195a7bbSHeiko Schocher  * because the WL-subsystem is shutting down
665c91a719dSKyungmin Park  *
666c91a719dSKyungmin Park  * This function copies a more worn out physical eraseblock to a less worn out
667c91a719dSKyungmin Park  * one. Returns zero in case of success and a negative error code in case of
668c91a719dSKyungmin Park  * failure.
669c91a719dSKyungmin Park  */
wear_leveling_worker(struct ubi_device * ubi,struct ubi_work * wrk,int shutdown)670c91a719dSKyungmin Park static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
6710195a7bbSHeiko Schocher 				int shutdown)
672c91a719dSKyungmin Park {
673ff94bc40SHeiko Schocher 	int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
6740195a7bbSHeiko Schocher 	int vol_id = -1, lnum = -1;
675ff94bc40SHeiko Schocher #ifdef CONFIG_MTD_UBI_FASTMAP
676ff94bc40SHeiko Schocher 	int anchor = wrk->anchor;
677ff94bc40SHeiko Schocher #endif
678c91a719dSKyungmin Park 	struct ubi_wl_entry *e1, *e2;
679c91a719dSKyungmin Park 	struct ubi_vid_hdr *vid_hdr;
680c91a719dSKyungmin Park 
681c91a719dSKyungmin Park 	kfree(wrk);
6820195a7bbSHeiko Schocher 	if (shutdown)
683c91a719dSKyungmin Park 		return 0;
684c91a719dSKyungmin Park 
685c91a719dSKyungmin Park 	vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
686c91a719dSKyungmin Park 	if (!vid_hdr)
687c91a719dSKyungmin Park 		return -ENOMEM;
688c91a719dSKyungmin Park 
689c91a719dSKyungmin Park 	mutex_lock(&ubi->move_mutex);
690c91a719dSKyungmin Park 	spin_lock(&ubi->wl_lock);
691c91a719dSKyungmin Park 	ubi_assert(!ubi->move_from && !ubi->move_to);
692c91a719dSKyungmin Park 	ubi_assert(!ubi->move_to_put);
693c91a719dSKyungmin Park 
694c91a719dSKyungmin Park 	if (!ubi->free.rb_node ||
695c91a719dSKyungmin Park 	    (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
696c91a719dSKyungmin Park 		/*
697c91a719dSKyungmin Park 		 * No free physical eraseblocks? Well, they must be waiting in
698c91a719dSKyungmin Park 		 * the queue to be erased. Cancel movement - it will be
699c91a719dSKyungmin Park 		 * triggered again when a free physical eraseblock appears.
700c91a719dSKyungmin Park 		 *
701c91a719dSKyungmin Park 		 * No used physical eraseblocks? They must be temporarily
702c91a719dSKyungmin Park 		 * protected from being moved. They will be moved to the
703c91a719dSKyungmin Park 		 * @ubi->used tree later and the wear-leveling will be
704c91a719dSKyungmin Park 		 * triggered again.
705c91a719dSKyungmin Park 		 */
706c91a719dSKyungmin Park 		dbg_wl("cancel WL, a list is empty: free %d, used %d",
707c91a719dSKyungmin Park 		       !ubi->free.rb_node, !ubi->used.rb_node);
708c91a719dSKyungmin Park 		goto out_cancel;
709c91a719dSKyungmin Park 	}
710c91a719dSKyungmin Park 
711ff94bc40SHeiko Schocher #ifdef CONFIG_MTD_UBI_FASTMAP
712ff94bc40SHeiko Schocher 	/* Check whether we need to produce an anchor PEB */
713ff94bc40SHeiko Schocher 	if (!anchor)
714ff94bc40SHeiko Schocher 		anchor = !anchor_pebs_avalible(&ubi->free);
715ff94bc40SHeiko Schocher 
716ff94bc40SHeiko Schocher 	if (anchor) {
717ff94bc40SHeiko Schocher 		e1 = find_anchor_wl_entry(&ubi->used);
718ff94bc40SHeiko Schocher 		if (!e1)
719ff94bc40SHeiko Schocher 			goto out_cancel;
720ff94bc40SHeiko Schocher 		e2 = get_peb_for_wl(ubi);
721ff94bc40SHeiko Schocher 		if (!e2)
722ff94bc40SHeiko Schocher 			goto out_cancel;
723ff94bc40SHeiko Schocher 
724ff94bc40SHeiko Schocher 		self_check_in_wl_tree(ubi, e1, &ubi->used);
725ff94bc40SHeiko Schocher 		rb_erase(&e1->u.rb, &ubi->used);
726ff94bc40SHeiko Schocher 		dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum);
727ff94bc40SHeiko Schocher 	} else if (!ubi->scrub.rb_node) {
728ff94bc40SHeiko Schocher #else
729c91a719dSKyungmin Park 	if (!ubi->scrub.rb_node) {
730ff94bc40SHeiko Schocher #endif
731c91a719dSKyungmin Park 		/*
732c91a719dSKyungmin Park 		 * Now pick the least worn-out used physical eraseblock and a
733c91a719dSKyungmin Park 		 * highly worn-out free physical eraseblock. If the erase
734c91a719dSKyungmin Park 		 * counters differ much enough, start wear-leveling.
735c91a719dSKyungmin Park 		 */
736ff94bc40SHeiko Schocher 		e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
737ff94bc40SHeiko Schocher 		e2 = get_peb_for_wl(ubi);
738ff94bc40SHeiko Schocher 		if (!e2)
739ff94bc40SHeiko Schocher 			goto out_cancel;
740c91a719dSKyungmin Park 
741c91a719dSKyungmin Park 		if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
742c91a719dSKyungmin Park 			dbg_wl("no WL needed: min used EC %d, max free EC %d",
743c91a719dSKyungmin Park 			       e1->ec, e2->ec);
744ff94bc40SHeiko Schocher 
745ff94bc40SHeiko Schocher 			/* Give the unused PEB back */
746ff94bc40SHeiko Schocher 			wl_tree_add(e2, &ubi->free);
7474e67c571SHeiko Schocher 			ubi->free_count++;
748c91a719dSKyungmin Park 			goto out_cancel;
749c91a719dSKyungmin Park 		}
750ff94bc40SHeiko Schocher 		self_check_in_wl_tree(ubi, e1, &ubi->used);
751ff94bc40SHeiko Schocher 		rb_erase(&e1->u.rb, &ubi->used);
752c91a719dSKyungmin Park 		dbg_wl("move PEB %d EC %d to PEB %d EC %d",
753c91a719dSKyungmin Park 		       e1->pnum, e1->ec, e2->pnum, e2->ec);
754c91a719dSKyungmin Park 	} else {
755c91a719dSKyungmin Park 		/* Perform scrubbing */
756c91a719dSKyungmin Park 		scrubbing = 1;
757ff94bc40SHeiko Schocher 		e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
758ff94bc40SHeiko Schocher 		e2 = get_peb_for_wl(ubi);
759ff94bc40SHeiko Schocher 		if (!e2)
760ff94bc40SHeiko Schocher 			goto out_cancel;
761ff94bc40SHeiko Schocher 
762ff94bc40SHeiko Schocher 		self_check_in_wl_tree(ubi, e1, &ubi->scrub);
763ff94bc40SHeiko Schocher 		rb_erase(&e1->u.rb, &ubi->scrub);
764c91a719dSKyungmin Park 		dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
765c91a719dSKyungmin Park 	}
766c91a719dSKyungmin Park 
767c91a719dSKyungmin Park 	ubi->move_from = e1;
768c91a719dSKyungmin Park 	ubi->move_to = e2;
769c91a719dSKyungmin Park 	spin_unlock(&ubi->wl_lock);
770c91a719dSKyungmin Park 
771c91a719dSKyungmin Park 	/*
772c91a719dSKyungmin Park 	 * Now we are going to copy physical eraseblock @e1->pnum to @e2->pnum.
773c91a719dSKyungmin Park 	 * We so far do not know which logical eraseblock our physical
774c91a719dSKyungmin Park 	 * eraseblock (@e1) belongs to. We have to read the volume identifier
775c91a719dSKyungmin Park 	 * header first.
776c91a719dSKyungmin Park 	 *
777c91a719dSKyungmin Park 	 * Note, we are protected from this PEB being unmapped and erased. The
778c91a719dSKyungmin Park 	 * 'ubi_wl_put_peb()' would wait for moving to be finished if the PEB
779c91a719dSKyungmin Park 	 * which is being moved was unmapped.
780c91a719dSKyungmin Park 	 */
781c91a719dSKyungmin Park 
782c91a719dSKyungmin Park 	err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
783c91a719dSKyungmin Park 	if (err && err != UBI_IO_BITFLIPS) {
784ff94bc40SHeiko Schocher 		if (err == UBI_IO_FF) {
785c91a719dSKyungmin Park 			/*
786c91a719dSKyungmin Park 			 * We are trying to move PEB without a VID header. UBI
787c91a719dSKyungmin Park 			 * always write VID headers shortly after the PEB was
788ff94bc40SHeiko Schocher 			 * given, so we have a situation when it has not yet
789ff94bc40SHeiko Schocher 			 * had a chance to write it, because it was preempted.
790ff94bc40SHeiko Schocher 			 * So add this PEB to the protection queue so far,
791ff94bc40SHeiko Schocher 			 * because presumably more data will be written there
792ff94bc40SHeiko Schocher 			 * (including the missing VID header), and then we'll
793ff94bc40SHeiko Schocher 			 * move it.
794c91a719dSKyungmin Park 			 */
795c91a719dSKyungmin Park 			dbg_wl("PEB %d has no VID header", e1->pnum);
796ff94bc40SHeiko Schocher 			protect = 1;
797ff94bc40SHeiko Schocher 			goto out_not_moved;
798ff94bc40SHeiko Schocher 		} else if (err == UBI_IO_FF_BITFLIPS) {
799ff94bc40SHeiko Schocher 			/*
800ff94bc40SHeiko Schocher 			 * The same situation as %UBI_IO_FF, but bit-flips were
801ff94bc40SHeiko Schocher 			 * detected. It is better to schedule this PEB for
802ff94bc40SHeiko Schocher 			 * scrubbing.
803ff94bc40SHeiko Schocher 			 */
804ff94bc40SHeiko Schocher 			dbg_wl("PEB %d has no VID header but has bit-flips",
805ff94bc40SHeiko Schocher 			       e1->pnum);
806ff94bc40SHeiko Schocher 			scrubbing = 1;
807c91a719dSKyungmin Park 			goto out_not_moved;
808c91a719dSKyungmin Park 		}
809c91a719dSKyungmin Park 
8100195a7bbSHeiko Schocher 		ubi_err(ubi, "error %d while reading VID header from PEB %d",
811c91a719dSKyungmin Park 			err, e1->pnum);
812c91a719dSKyungmin Park 		goto out_error;
813c91a719dSKyungmin Park 	}
814c91a719dSKyungmin Park 
815ff94bc40SHeiko Schocher 	vol_id = be32_to_cpu(vid_hdr->vol_id);
816ff94bc40SHeiko Schocher 	lnum = be32_to_cpu(vid_hdr->lnum);
817ff94bc40SHeiko Schocher 
818c91a719dSKyungmin Park 	err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
819c91a719dSKyungmin Park 	if (err) {
820ff94bc40SHeiko Schocher 		if (err == MOVE_CANCEL_RACE) {
821ff94bc40SHeiko Schocher 			/*
822ff94bc40SHeiko Schocher 			 * The LEB has not been moved because the volume is
823ff94bc40SHeiko Schocher 			 * being deleted or the PEB has been put meanwhile. We
824ff94bc40SHeiko Schocher 			 * should prevent this PEB from being selected for
825ff94bc40SHeiko Schocher 			 * wear-leveling movement again, so put it to the
826ff94bc40SHeiko Schocher 			 * protection queue.
827ff94bc40SHeiko Schocher 			 */
828ff94bc40SHeiko Schocher 			protect = 1;
829ff94bc40SHeiko Schocher 			goto out_not_moved;
830ff94bc40SHeiko Schocher 		}
831ff94bc40SHeiko Schocher 		if (err == MOVE_RETRY) {
832ff94bc40SHeiko Schocher 			scrubbing = 1;
833ff94bc40SHeiko Schocher 			goto out_not_moved;
834ff94bc40SHeiko Schocher 		}
835ff94bc40SHeiko Schocher 		if (err == MOVE_TARGET_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
836ff94bc40SHeiko Schocher 		    err == MOVE_TARGET_RD_ERR) {
837ff94bc40SHeiko Schocher 			/*
838ff94bc40SHeiko Schocher 			 * Target PEB had bit-flips or write error - torture it.
839ff94bc40SHeiko Schocher 			 */
840ff94bc40SHeiko Schocher 			torture = 1;
841ff94bc40SHeiko Schocher 			goto out_not_moved;
842ff94bc40SHeiko Schocher 		}
843ff94bc40SHeiko Schocher 
844ff94bc40SHeiko Schocher 		if (err == MOVE_SOURCE_RD_ERR) {
845ff94bc40SHeiko Schocher 			/*
846ff94bc40SHeiko Schocher 			 * An error happened while reading the source PEB. Do
847ff94bc40SHeiko Schocher 			 * not switch to R/O mode in this case, and give the
848ff94bc40SHeiko Schocher 			 * upper layers a possibility to recover from this,
849ff94bc40SHeiko Schocher 			 * e.g. by unmapping corresponding LEB. Instead, just
850ff94bc40SHeiko Schocher 			 * put this PEB to the @ubi->erroneous list to prevent
851ff94bc40SHeiko Schocher 			 * UBI from trying to move it over and over again.
852ff94bc40SHeiko Schocher 			 */
853ff94bc40SHeiko Schocher 			if (ubi->erroneous_peb_count > ubi->max_erroneous) {
8540195a7bbSHeiko Schocher 				ubi_err(ubi, "too many erroneous eraseblocks (%d)",
855ff94bc40SHeiko Schocher 					ubi->erroneous_peb_count);
856ff94bc40SHeiko Schocher 				goto out_error;
857ff94bc40SHeiko Schocher 			}
858ff94bc40SHeiko Schocher 			erroneous = 1;
859ff94bc40SHeiko Schocher 			goto out_not_moved;
860ff94bc40SHeiko Schocher 		}
861c91a719dSKyungmin Park 
862c91a719dSKyungmin Park 		if (err < 0)
863c91a719dSKyungmin Park 			goto out_error;
864c91a719dSKyungmin Park 
865ff94bc40SHeiko Schocher 		ubi_assert(0);
866c91a719dSKyungmin Park 	}
867c91a719dSKyungmin Park 
868ff94bc40SHeiko Schocher 	/* The PEB has been successfully moved */
869ff94bc40SHeiko Schocher 	if (scrubbing)
8700195a7bbSHeiko Schocher 		ubi_msg(ubi, "scrubbed PEB %d (LEB %d:%d), data moved to PEB %d",
871ff94bc40SHeiko Schocher 			e1->pnum, vol_id, lnum, e2->pnum);
872c91a719dSKyungmin Park 	ubi_free_vid_hdr(ubi, vid_hdr);
873ff94bc40SHeiko Schocher 
874c91a719dSKyungmin Park 	spin_lock(&ubi->wl_lock);
875ff94bc40SHeiko Schocher 	if (!ubi->move_to_put) {
876c91a719dSKyungmin Park 		wl_tree_add(e2, &ubi->used);
877ff94bc40SHeiko Schocher 		e2 = NULL;
878ff94bc40SHeiko Schocher 	}
879c91a719dSKyungmin Park 	ubi->move_from = ubi->move_to = NULL;
880c91a719dSKyungmin Park 	ubi->move_to_put = ubi->wl_scheduled = 0;
881c91a719dSKyungmin Park 	spin_unlock(&ubi->wl_lock);
882c91a719dSKyungmin Park 
883ff94bc40SHeiko Schocher 	err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
884ff94bc40SHeiko Schocher 	if (err) {
885ff94bc40SHeiko Schocher 		if (e2)
8860195a7bbSHeiko Schocher 			wl_entry_destroy(ubi, e2);
887ff94bc40SHeiko Schocher 		goto out_ro;
888ff94bc40SHeiko Schocher 	}
889ff94bc40SHeiko Schocher 
890ff94bc40SHeiko Schocher 	if (e2) {
891c91a719dSKyungmin Park 		/*
892c91a719dSKyungmin Park 		 * Well, the target PEB was put meanwhile, schedule it for
893c91a719dSKyungmin Park 		 * erasure.
894c91a719dSKyungmin Park 		 */
895ff94bc40SHeiko Schocher 		dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase",
896ff94bc40SHeiko Schocher 		       e2->pnum, vol_id, lnum);
897ff94bc40SHeiko Schocher 		err = do_sync_erase(ubi, e2, vol_id, lnum, 0);
8980195a7bbSHeiko Schocher 		if (err)
899ff94bc40SHeiko Schocher 			goto out_ro;
900c91a719dSKyungmin Park 	}
901c91a719dSKyungmin Park 
902c91a719dSKyungmin Park 	dbg_wl("done");
903c91a719dSKyungmin Park 	mutex_unlock(&ubi->move_mutex);
904c91a719dSKyungmin Park 	return 0;
905c91a719dSKyungmin Park 
906c91a719dSKyungmin Park 	/*
907c91a719dSKyungmin Park 	 * For some reasons the LEB was not moved, might be an error, might be
908c91a719dSKyungmin Park 	 * something else. @e1 was not changed, so return it back. @e2 might
909ff94bc40SHeiko Schocher 	 * have been changed, schedule it for erasure.
910c91a719dSKyungmin Park 	 */
911c91a719dSKyungmin Park out_not_moved:
912ff94bc40SHeiko Schocher 	if (vol_id != -1)
913ff94bc40SHeiko Schocher 		dbg_wl("cancel moving PEB %d (LEB %d:%d) to PEB %d (%d)",
914ff94bc40SHeiko Schocher 		       e1->pnum, vol_id, lnum, e2->pnum, err);
915ff94bc40SHeiko Schocher 	else
916ff94bc40SHeiko Schocher 		dbg_wl("cancel moving PEB %d to PEB %d (%d)",
917ff94bc40SHeiko Schocher 		       e1->pnum, e2->pnum, err);
918c91a719dSKyungmin Park 	spin_lock(&ubi->wl_lock);
919ff94bc40SHeiko Schocher 	if (protect)
920ff94bc40SHeiko Schocher 		prot_queue_add(ubi, e1);
921ff94bc40SHeiko Schocher 	else if (erroneous) {
922ff94bc40SHeiko Schocher 		wl_tree_add(e1, &ubi->erroneous);
923ff94bc40SHeiko Schocher 		ubi->erroneous_peb_count += 1;
924ff94bc40SHeiko Schocher 	} else if (scrubbing)
925c91a719dSKyungmin Park 		wl_tree_add(e1, &ubi->scrub);
926c91a719dSKyungmin Park 	else
927c91a719dSKyungmin Park 		wl_tree_add(e1, &ubi->used);
928ff94bc40SHeiko Schocher 	ubi_assert(!ubi->move_to_put);
929c91a719dSKyungmin Park 	ubi->move_from = ubi->move_to = NULL;
930ff94bc40SHeiko Schocher 	ubi->wl_scheduled = 0;
931c91a719dSKyungmin Park 	spin_unlock(&ubi->wl_lock);
932c91a719dSKyungmin Park 
933ff94bc40SHeiko Schocher 	ubi_free_vid_hdr(ubi, vid_hdr);
934ff94bc40SHeiko Schocher 	err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
9350195a7bbSHeiko Schocher 	if (err)
936ff94bc40SHeiko Schocher 		goto out_ro;
9370195a7bbSHeiko Schocher 
938c91a719dSKyungmin Park 	mutex_unlock(&ubi->move_mutex);
939c91a719dSKyungmin Park 	return 0;
940c91a719dSKyungmin Park 
941c91a719dSKyungmin Park out_error:
942ff94bc40SHeiko Schocher 	if (vol_id != -1)
9430195a7bbSHeiko Schocher 		ubi_err(ubi, "error %d while moving PEB %d to PEB %d",
944c91a719dSKyungmin Park 			err, e1->pnum, e2->pnum);
945ff94bc40SHeiko Schocher 	else
9460195a7bbSHeiko Schocher 		ubi_err(ubi, "error %d while moving PEB %d (LEB %d:%d) to PEB %d",
947ff94bc40SHeiko Schocher 			err, e1->pnum, vol_id, lnum, e2->pnum);
948c91a719dSKyungmin Park 	spin_lock(&ubi->wl_lock);
949c91a719dSKyungmin Park 	ubi->move_from = ubi->move_to = NULL;
950c91a719dSKyungmin Park 	ubi->move_to_put = ubi->wl_scheduled = 0;
951c91a719dSKyungmin Park 	spin_unlock(&ubi->wl_lock);
952c91a719dSKyungmin Park 
953ff94bc40SHeiko Schocher 	ubi_free_vid_hdr(ubi, vid_hdr);
9540195a7bbSHeiko Schocher 	wl_entry_destroy(ubi, e1);
9550195a7bbSHeiko Schocher 	wl_entry_destroy(ubi, e2);
956c91a719dSKyungmin Park 
957ff94bc40SHeiko Schocher out_ro:
958ff94bc40SHeiko Schocher 	ubi_ro_mode(ubi);
959c91a719dSKyungmin Park 	mutex_unlock(&ubi->move_mutex);
960ff94bc40SHeiko Schocher 	ubi_assert(err != 0);
961ff94bc40SHeiko Schocher 	return err < 0 ? err : -EIO;
962c91a719dSKyungmin Park 
963c91a719dSKyungmin Park out_cancel:
964c91a719dSKyungmin Park 	ubi->wl_scheduled = 0;
965c91a719dSKyungmin Park 	spin_unlock(&ubi->wl_lock);
966c91a719dSKyungmin Park 	mutex_unlock(&ubi->move_mutex);
967c91a719dSKyungmin Park 	ubi_free_vid_hdr(ubi, vid_hdr);
968c91a719dSKyungmin Park 	return 0;
969c91a719dSKyungmin Park }
970c91a719dSKyungmin Park 
971c91a719dSKyungmin Park /**
972c91a719dSKyungmin Park  * ensure_wear_leveling - schedule wear-leveling if it is needed.
973c91a719dSKyungmin Park  * @ubi: UBI device description object
974ff94bc40SHeiko Schocher  * @nested: set to non-zero if this function is called from UBI worker
975c91a719dSKyungmin Park  *
976c91a719dSKyungmin Park  * This function checks if it is time to start wear-leveling and schedules it
977c91a719dSKyungmin Park  * if yes. This function returns zero in case of success and a negative error
978c91a719dSKyungmin Park  * code in case of failure.
979c91a719dSKyungmin Park  */
980ff94bc40SHeiko Schocher static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
981c91a719dSKyungmin Park {
982c91a719dSKyungmin Park 	int err = 0;
983c91a719dSKyungmin Park 	struct ubi_wl_entry *e1;
984c91a719dSKyungmin Park 	struct ubi_wl_entry *e2;
985c91a719dSKyungmin Park 	struct ubi_work *wrk;
986c91a719dSKyungmin Park 
987c91a719dSKyungmin Park 	spin_lock(&ubi->wl_lock);
988c91a719dSKyungmin Park 	if (ubi->wl_scheduled)
989c91a719dSKyungmin Park 		/* Wear-leveling is already in the work queue */
990c91a719dSKyungmin Park 		goto out_unlock;
991c91a719dSKyungmin Park 
992c91a719dSKyungmin Park 	/*
993c91a719dSKyungmin Park 	 * If the ubi->scrub tree is not empty, scrubbing is needed, and the
994c91a719dSKyungmin Park 	 * the WL worker has to be scheduled anyway.
995c91a719dSKyungmin Park 	 */
996c91a719dSKyungmin Park 	if (!ubi->scrub.rb_node) {
997c91a719dSKyungmin Park 		if (!ubi->used.rb_node || !ubi->free.rb_node)
998c91a719dSKyungmin Park 			/* No physical eraseblocks - no deal */
999c91a719dSKyungmin Park 			goto out_unlock;
1000c91a719dSKyungmin Park 
1001c91a719dSKyungmin Park 		/*
1002c91a719dSKyungmin Park 		 * We schedule wear-leveling only if the difference between the
1003c91a719dSKyungmin Park 		 * lowest erase counter of used physical eraseblocks and a high
1004ff94bc40SHeiko Schocher 		 * erase counter of free physical eraseblocks is greater than
1005c91a719dSKyungmin Park 		 * %UBI_WL_THRESHOLD.
1006c91a719dSKyungmin Park 		 */
1007ff94bc40SHeiko Schocher 		e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
1008ff94bc40SHeiko Schocher 		e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
1009c91a719dSKyungmin Park 
1010c91a719dSKyungmin Park 		if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
1011c91a719dSKyungmin Park 			goto out_unlock;
1012c91a719dSKyungmin Park 		dbg_wl("schedule wear-leveling");
1013c91a719dSKyungmin Park 	} else
1014c91a719dSKyungmin Park 		dbg_wl("schedule scrubbing");
1015c91a719dSKyungmin Park 
1016c91a719dSKyungmin Park 	ubi->wl_scheduled = 1;
1017c91a719dSKyungmin Park 	spin_unlock(&ubi->wl_lock);
1018c91a719dSKyungmin Park 
1019c91a719dSKyungmin Park 	wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
1020c91a719dSKyungmin Park 	if (!wrk) {
1021c91a719dSKyungmin Park 		err = -ENOMEM;
1022c91a719dSKyungmin Park 		goto out_cancel;
1023c91a719dSKyungmin Park 	}
1024c91a719dSKyungmin Park 
1025ff94bc40SHeiko Schocher 	wrk->anchor = 0;
1026c91a719dSKyungmin Park 	wrk->func = &wear_leveling_worker;
1027ff94bc40SHeiko Schocher 	if (nested)
1028ff94bc40SHeiko Schocher 		__schedule_ubi_work(ubi, wrk);
1029f82290afSRichard Weinberger #ifndef __UBOOT__
1030ff94bc40SHeiko Schocher 	else
1031c91a719dSKyungmin Park 		schedule_ubi_work(ubi, wrk);
1032f82290afSRichard Weinberger #else
1033f82290afSRichard Weinberger 	else {
1034f82290afSRichard Weinberger 		schedule_ubi_work(ubi, wrk);
1035f82290afSRichard Weinberger 		ubi_do_worker(ubi);
1036f82290afSRichard Weinberger 	}
1037f82290afSRichard Weinberger #endif
1038c91a719dSKyungmin Park 	return err;
1039c91a719dSKyungmin Park 
1040c91a719dSKyungmin Park out_cancel:
1041c91a719dSKyungmin Park 	spin_lock(&ubi->wl_lock);
1042c91a719dSKyungmin Park 	ubi->wl_scheduled = 0;
1043c91a719dSKyungmin Park out_unlock:
1044c91a719dSKyungmin Park 	spin_unlock(&ubi->wl_lock);
1045c91a719dSKyungmin Park 	return err;
1046c91a719dSKyungmin Park }
1047c91a719dSKyungmin Park 
1048c91a719dSKyungmin Park /**
1049c91a719dSKyungmin Park  * erase_worker - physical eraseblock erase worker function.
1050c91a719dSKyungmin Park  * @ubi: UBI device description object
1051c91a719dSKyungmin Park  * @wl_wrk: the work object
10520195a7bbSHeiko Schocher  * @shutdown: non-zero if the worker has to free memory and exit
10530195a7bbSHeiko Schocher  * because the WL sub-system is shutting down
1054c91a719dSKyungmin Park  *
1055c91a719dSKyungmin Park  * This function erases a physical eraseblock and perform torture testing if
1056c91a719dSKyungmin Park  * needed. It also takes care about marking the physical eraseblock bad if
1057c91a719dSKyungmin Park  * needed. Returns zero in case of success and a negative error code in case of
1058c91a719dSKyungmin Park  * failure.
1059c91a719dSKyungmin Park  */
1060c91a719dSKyungmin Park static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
10610195a7bbSHeiko Schocher 			int shutdown)
1062c91a719dSKyungmin Park {
1063c91a719dSKyungmin Park 	struct ubi_wl_entry *e = wl_wrk->e;
1064ff94bc40SHeiko Schocher 	int pnum = e->pnum;
1065ff94bc40SHeiko Schocher 	int vol_id = wl_wrk->vol_id;
1066ff94bc40SHeiko Schocher 	int lnum = wl_wrk->lnum;
1067ff94bc40SHeiko Schocher 	int err, available_consumed = 0;
1068c91a719dSKyungmin Park 
10690195a7bbSHeiko Schocher 	if (shutdown) {
1070c91a719dSKyungmin Park 		dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
1071c91a719dSKyungmin Park 		kfree(wl_wrk);
10720195a7bbSHeiko Schocher 		wl_entry_destroy(ubi, e);
1073c91a719dSKyungmin Park 		return 0;
1074c91a719dSKyungmin Park 	}
1075c91a719dSKyungmin Park 
1076ff94bc40SHeiko Schocher 	dbg_wl("erase PEB %d EC %d LEB %d:%d",
1077ff94bc40SHeiko Schocher 	       pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum);
1078ff94bc40SHeiko Schocher 
1079c91a719dSKyungmin Park 	err = sync_erase(ubi, e, wl_wrk->torture);
1080c91a719dSKyungmin Park 	if (!err) {
1081c91a719dSKyungmin Park 		/* Fine, we've erased it successfully */
1082c91a719dSKyungmin Park 		kfree(wl_wrk);
1083c91a719dSKyungmin Park 
1084c91a719dSKyungmin Park 		spin_lock(&ubi->wl_lock);
1085c91a719dSKyungmin Park 		wl_tree_add(e, &ubi->free);
1086ff94bc40SHeiko Schocher 		ubi->free_count++;
1087c91a719dSKyungmin Park 		spin_unlock(&ubi->wl_lock);
1088c91a719dSKyungmin Park 
1089c91a719dSKyungmin Park 		/*
1090ff94bc40SHeiko Schocher 		 * One more erase operation has happened, take care about
1091ff94bc40SHeiko Schocher 		 * protected physical eraseblocks.
1092c91a719dSKyungmin Park 		 */
1093ff94bc40SHeiko Schocher 		serve_prot_queue(ubi);
1094c91a719dSKyungmin Park 
1095c91a719dSKyungmin Park 		/* And take care about wear-leveling */
1096ff94bc40SHeiko Schocher 		err = ensure_wear_leveling(ubi, 1);
1097c91a719dSKyungmin Park 		return err;
1098c91a719dSKyungmin Park 	}
1099c91a719dSKyungmin Park 
11000195a7bbSHeiko Schocher 	ubi_err(ubi, "failed to erase PEB %d, error %d", pnum, err);
1101c91a719dSKyungmin Park 	kfree(wl_wrk);
1102c91a719dSKyungmin Park 
1103c91a719dSKyungmin Park 	if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
1104c91a719dSKyungmin Park 	    err == -EBUSY) {
1105c91a719dSKyungmin Park 		int err1;
1106c91a719dSKyungmin Park 
1107c91a719dSKyungmin Park 		/* Re-schedule the LEB for erasure */
1108ff94bc40SHeiko Schocher 		err1 = schedule_erase(ubi, e, vol_id, lnum, 0);
1109c91a719dSKyungmin Park 		if (err1) {
1110c91a719dSKyungmin Park 			err = err1;
1111c91a719dSKyungmin Park 			goto out_ro;
1112c91a719dSKyungmin Park 		}
1113c91a719dSKyungmin Park 		return err;
1114ff94bc40SHeiko Schocher 	}
1115ff94bc40SHeiko Schocher 
11160195a7bbSHeiko Schocher 	wl_entry_destroy(ubi, e);
1117ff94bc40SHeiko Schocher 	if (err != -EIO)
1118c91a719dSKyungmin Park 		/*
1119c91a719dSKyungmin Park 		 * If this is not %-EIO, we have no idea what to do. Scheduling
1120c91a719dSKyungmin Park 		 * this physical eraseblock for erasure again would cause
1121ff94bc40SHeiko Schocher 		 * errors again and again. Well, lets switch to R/O mode.
1122c91a719dSKyungmin Park 		 */
1123c91a719dSKyungmin Park 		goto out_ro;
1124c91a719dSKyungmin Park 
1125c91a719dSKyungmin Park 	/* It is %-EIO, the PEB went bad */
1126c91a719dSKyungmin Park 
1127c91a719dSKyungmin Park 	if (!ubi->bad_allowed) {
11280195a7bbSHeiko Schocher 		ubi_err(ubi, "bad physical eraseblock %d detected", pnum);
1129c91a719dSKyungmin Park 		goto out_ro;
1130c91a719dSKyungmin Park 	}
1131c91a719dSKyungmin Park 
1132c91a719dSKyungmin Park 	spin_lock(&ubi->volumes_lock);
1133c91a719dSKyungmin Park 	if (ubi->beb_rsvd_pebs == 0) {
1134ff94bc40SHeiko Schocher 		if (ubi->avail_pebs == 0) {
1135c91a719dSKyungmin Park 			spin_unlock(&ubi->volumes_lock);
11360195a7bbSHeiko Schocher 			ubi_err(ubi, "no reserved/available physical eraseblocks");
1137c91a719dSKyungmin Park 			goto out_ro;
1138c91a719dSKyungmin Park 		}
1139ff94bc40SHeiko Schocher 		ubi->avail_pebs -= 1;
1140ff94bc40SHeiko Schocher 		available_consumed = 1;
1141ff94bc40SHeiko Schocher 	}
1142c91a719dSKyungmin Park 	spin_unlock(&ubi->volumes_lock);
1143c91a719dSKyungmin Park 
11440195a7bbSHeiko Schocher 	ubi_msg(ubi, "mark PEB %d as bad", pnum);
1145c91a719dSKyungmin Park 	err = ubi_io_mark_bad(ubi, pnum);
1146c91a719dSKyungmin Park 	if (err)
1147c91a719dSKyungmin Park 		goto out_ro;
1148c91a719dSKyungmin Park 
1149c91a719dSKyungmin Park 	spin_lock(&ubi->volumes_lock);
1150ff94bc40SHeiko Schocher 	if (ubi->beb_rsvd_pebs > 0) {
1151ff94bc40SHeiko Schocher 		if (available_consumed) {
1152ff94bc40SHeiko Schocher 			/*
1153ff94bc40SHeiko Schocher 			 * The amount of reserved PEBs increased since we last
1154ff94bc40SHeiko Schocher 			 * checked.
1155ff94bc40SHeiko Schocher 			 */
1156ff94bc40SHeiko Schocher 			ubi->avail_pebs += 1;
1157ff94bc40SHeiko Schocher 			available_consumed = 0;
1158ff94bc40SHeiko Schocher 		}
1159c91a719dSKyungmin Park 		ubi->beb_rsvd_pebs -= 1;
1160ff94bc40SHeiko Schocher 	}
1161c91a719dSKyungmin Park 	ubi->bad_peb_count += 1;
1162c91a719dSKyungmin Park 	ubi->good_peb_count -= 1;
1163c91a719dSKyungmin Park 	ubi_calculate_reserved(ubi);
1164ff94bc40SHeiko Schocher 	if (available_consumed)
11650195a7bbSHeiko Schocher 		ubi_warn(ubi, "no PEBs in the reserved pool, used an available PEB");
1166ff94bc40SHeiko Schocher 	else if (ubi->beb_rsvd_pebs)
11670195a7bbSHeiko Schocher 		ubi_msg(ubi, "%d PEBs left in the reserve",
11680195a7bbSHeiko Schocher 			ubi->beb_rsvd_pebs);
1169ff94bc40SHeiko Schocher 	else
11700195a7bbSHeiko Schocher 		ubi_warn(ubi, "last PEB from the reserve was used");
1171c91a719dSKyungmin Park 	spin_unlock(&ubi->volumes_lock);
1172c91a719dSKyungmin Park 
1173c91a719dSKyungmin Park 	return err;
1174c91a719dSKyungmin Park 
1175c91a719dSKyungmin Park out_ro:
1176ff94bc40SHeiko Schocher 	if (available_consumed) {
1177ff94bc40SHeiko Schocher 		spin_lock(&ubi->volumes_lock);
1178ff94bc40SHeiko Schocher 		ubi->avail_pebs += 1;
1179ff94bc40SHeiko Schocher 		spin_unlock(&ubi->volumes_lock);
1180ff94bc40SHeiko Schocher 	}
1181c91a719dSKyungmin Park 	ubi_ro_mode(ubi);
1182c91a719dSKyungmin Park 	return err;
1183c91a719dSKyungmin Park }
1184c91a719dSKyungmin Park 
1185c91a719dSKyungmin Park /**
1186ff94bc40SHeiko Schocher  * ubi_wl_put_peb - return a PEB to the wear-leveling sub-system.
1187c91a719dSKyungmin Park  * @ubi: UBI device description object
1188ff94bc40SHeiko Schocher  * @vol_id: the volume ID that last used this PEB
1189ff94bc40SHeiko Schocher  * @lnum: the last used logical eraseblock number for the PEB
1190c91a719dSKyungmin Park  * @pnum: physical eraseblock to return
1191c91a719dSKyungmin Park  * @torture: if this physical eraseblock has to be tortured
1192c91a719dSKyungmin Park  *
1193c91a719dSKyungmin Park  * This function is called to return physical eraseblock @pnum to the pool of
1194c91a719dSKyungmin Park  * free physical eraseblocks. The @torture flag has to be set if an I/O error
1195c91a719dSKyungmin Park  * occurred to this @pnum and it has to be tested. This function returns zero
1196c91a719dSKyungmin Park  * in case of success, and a negative error code in case of failure.
1197c91a719dSKyungmin Park  */
1198ff94bc40SHeiko Schocher int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
1199ff94bc40SHeiko Schocher 		   int pnum, int torture)
1200c91a719dSKyungmin Park {
1201c91a719dSKyungmin Park 	int err;
1202c91a719dSKyungmin Park 	struct ubi_wl_entry *e;
1203c91a719dSKyungmin Park 
1204c91a719dSKyungmin Park 	dbg_wl("PEB %d", pnum);
1205c91a719dSKyungmin Park 	ubi_assert(pnum >= 0);
1206c91a719dSKyungmin Park 	ubi_assert(pnum < ubi->peb_count);
1207c91a719dSKyungmin Park 
12080195a7bbSHeiko Schocher 	down_read(&ubi->fm_protect);
12090195a7bbSHeiko Schocher 
1210c91a719dSKyungmin Park retry:
1211c91a719dSKyungmin Park 	spin_lock(&ubi->wl_lock);
1212c91a719dSKyungmin Park 	e = ubi->lookuptbl[pnum];
1213c91a719dSKyungmin Park 	if (e == ubi->move_from) {
1214c91a719dSKyungmin Park 		/*
1215c91a719dSKyungmin Park 		 * User is putting the physical eraseblock which was selected to
1216c91a719dSKyungmin Park 		 * be moved. It will be scheduled for erasure in the
1217c91a719dSKyungmin Park 		 * wear-leveling worker.
1218c91a719dSKyungmin Park 		 */
1219c91a719dSKyungmin Park 		dbg_wl("PEB %d is being moved, wait", pnum);
1220c91a719dSKyungmin Park 		spin_unlock(&ubi->wl_lock);
1221c91a719dSKyungmin Park 
1222c91a719dSKyungmin Park 		/* Wait for the WL worker by taking the @ubi->move_mutex */
1223c91a719dSKyungmin Park 		mutex_lock(&ubi->move_mutex);
1224c91a719dSKyungmin Park 		mutex_unlock(&ubi->move_mutex);
1225c91a719dSKyungmin Park 		goto retry;
1226c91a719dSKyungmin Park 	} else if (e == ubi->move_to) {
1227c91a719dSKyungmin Park 		/*
1228c91a719dSKyungmin Park 		 * User is putting the physical eraseblock which was selected
1229c91a719dSKyungmin Park 		 * as the target the data is moved to. It may happen if the EBA
1230ff94bc40SHeiko Schocher 		 * sub-system already re-mapped the LEB in 'ubi_eba_copy_leb()'
1231ff94bc40SHeiko Schocher 		 * but the WL sub-system has not put the PEB to the "used" tree
1232ff94bc40SHeiko Schocher 		 * yet, but it is about to do this. So we just set a flag which
1233ff94bc40SHeiko Schocher 		 * will tell the WL worker that the PEB is not needed anymore
1234ff94bc40SHeiko Schocher 		 * and should be scheduled for erasure.
1235c91a719dSKyungmin Park 		 */
1236c91a719dSKyungmin Park 		dbg_wl("PEB %d is the target of data moving", pnum);
1237c91a719dSKyungmin Park 		ubi_assert(!ubi->move_to_put);
1238c91a719dSKyungmin Park 		ubi->move_to_put = 1;
1239c91a719dSKyungmin Park 		spin_unlock(&ubi->wl_lock);
12400195a7bbSHeiko Schocher 		up_read(&ubi->fm_protect);
1241c91a719dSKyungmin Park 		return 0;
1242c91a719dSKyungmin Park 	} else {
1243c91a719dSKyungmin Park 		if (in_wl_tree(e, &ubi->used)) {
1244ff94bc40SHeiko Schocher 			self_check_in_wl_tree(ubi, e, &ubi->used);
1245ff94bc40SHeiko Schocher 			rb_erase(&e->u.rb, &ubi->used);
1246c91a719dSKyungmin Park 		} else if (in_wl_tree(e, &ubi->scrub)) {
1247ff94bc40SHeiko Schocher 			self_check_in_wl_tree(ubi, e, &ubi->scrub);
1248ff94bc40SHeiko Schocher 			rb_erase(&e->u.rb, &ubi->scrub);
1249ff94bc40SHeiko Schocher 		} else if (in_wl_tree(e, &ubi->erroneous)) {
1250ff94bc40SHeiko Schocher 			self_check_in_wl_tree(ubi, e, &ubi->erroneous);
1251ff94bc40SHeiko Schocher 			rb_erase(&e->u.rb, &ubi->erroneous);
1252ff94bc40SHeiko Schocher 			ubi->erroneous_peb_count -= 1;
1253ff94bc40SHeiko Schocher 			ubi_assert(ubi->erroneous_peb_count >= 0);
1254ff94bc40SHeiko Schocher 			/* Erroneous PEBs should be tortured */
1255ff94bc40SHeiko Schocher 			torture = 1;
1256c91a719dSKyungmin Park 		} else {
1257ff94bc40SHeiko Schocher 			err = prot_queue_del(ubi, e->pnum);
1258c91a719dSKyungmin Park 			if (err) {
12590195a7bbSHeiko Schocher 				ubi_err(ubi, "PEB %d not found", pnum);
1260c91a719dSKyungmin Park 				ubi_ro_mode(ubi);
1261c91a719dSKyungmin Park 				spin_unlock(&ubi->wl_lock);
12620195a7bbSHeiko Schocher 				up_read(&ubi->fm_protect);
1263c91a719dSKyungmin Park 				return err;
1264c91a719dSKyungmin Park 			}
1265c91a719dSKyungmin Park 		}
1266c91a719dSKyungmin Park 	}
1267c91a719dSKyungmin Park 	spin_unlock(&ubi->wl_lock);
1268c91a719dSKyungmin Park 
1269ff94bc40SHeiko Schocher 	err = schedule_erase(ubi, e, vol_id, lnum, torture);
1270c91a719dSKyungmin Park 	if (err) {
1271c91a719dSKyungmin Park 		spin_lock(&ubi->wl_lock);
1272c91a719dSKyungmin Park 		wl_tree_add(e, &ubi->used);
1273c91a719dSKyungmin Park 		spin_unlock(&ubi->wl_lock);
1274c91a719dSKyungmin Park 	}
1275c91a719dSKyungmin Park 
12760195a7bbSHeiko Schocher 	up_read(&ubi->fm_protect);
1277c91a719dSKyungmin Park 	return err;
1278c91a719dSKyungmin Park }
1279c91a719dSKyungmin Park 
1280c91a719dSKyungmin Park /**
1281c91a719dSKyungmin Park  * ubi_wl_scrub_peb - schedule a physical eraseblock for scrubbing.
1282c91a719dSKyungmin Park  * @ubi: UBI device description object
1283c91a719dSKyungmin Park  * @pnum: the physical eraseblock to schedule
1284c91a719dSKyungmin Park  *
1285c91a719dSKyungmin Park  * If a bit-flip in a physical eraseblock is detected, this physical eraseblock
1286c91a719dSKyungmin Park  * needs scrubbing. This function schedules a physical eraseblock for
1287c91a719dSKyungmin Park  * scrubbing which is done in background. This function returns zero in case of
1288c91a719dSKyungmin Park  * success and a negative error code in case of failure.
1289c91a719dSKyungmin Park  */
1290c91a719dSKyungmin Park int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
1291c91a719dSKyungmin Park {
1292c91a719dSKyungmin Park 	struct ubi_wl_entry *e;
1293c91a719dSKyungmin Park 
12940195a7bbSHeiko Schocher 	ubi_msg(ubi, "schedule PEB %d for scrubbing", pnum);
1295c91a719dSKyungmin Park 
1296c91a719dSKyungmin Park retry:
1297c91a719dSKyungmin Park 	spin_lock(&ubi->wl_lock);
1298c91a719dSKyungmin Park 	e = ubi->lookuptbl[pnum];
1299ff94bc40SHeiko Schocher 	if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) ||
1300ff94bc40SHeiko Schocher 				   in_wl_tree(e, &ubi->erroneous)) {
1301c91a719dSKyungmin Park 		spin_unlock(&ubi->wl_lock);
1302c91a719dSKyungmin Park 		return 0;
1303c91a719dSKyungmin Park 	}
1304c91a719dSKyungmin Park 
1305c91a719dSKyungmin Park 	if (e == ubi->move_to) {
1306c91a719dSKyungmin Park 		/*
1307c91a719dSKyungmin Park 		 * This physical eraseblock was used to move data to. The data
1308c91a719dSKyungmin Park 		 * was moved but the PEB was not yet inserted to the proper
1309c91a719dSKyungmin Park 		 * tree. We should just wait a little and let the WL worker
1310c91a719dSKyungmin Park 		 * proceed.
1311c91a719dSKyungmin Park 		 */
1312c91a719dSKyungmin Park 		spin_unlock(&ubi->wl_lock);
1313c91a719dSKyungmin Park 		dbg_wl("the PEB %d is not in proper tree, retry", pnum);
1314c91a719dSKyungmin Park 		yield();
1315c91a719dSKyungmin Park 		goto retry;
1316c91a719dSKyungmin Park 	}
1317c91a719dSKyungmin Park 
1318c91a719dSKyungmin Park 	if (in_wl_tree(e, &ubi->used)) {
1319ff94bc40SHeiko Schocher 		self_check_in_wl_tree(ubi, e, &ubi->used);
1320ff94bc40SHeiko Schocher 		rb_erase(&e->u.rb, &ubi->used);
1321c91a719dSKyungmin Park 	} else {
1322c91a719dSKyungmin Park 		int err;
1323c91a719dSKyungmin Park 
1324ff94bc40SHeiko Schocher 		err = prot_queue_del(ubi, e->pnum);
1325c91a719dSKyungmin Park 		if (err) {
13260195a7bbSHeiko Schocher 			ubi_err(ubi, "PEB %d not found", pnum);
1327c91a719dSKyungmin Park 			ubi_ro_mode(ubi);
1328c91a719dSKyungmin Park 			spin_unlock(&ubi->wl_lock);
1329c91a719dSKyungmin Park 			return err;
1330c91a719dSKyungmin Park 		}
1331c91a719dSKyungmin Park 	}
1332c91a719dSKyungmin Park 
1333c91a719dSKyungmin Park 	wl_tree_add(e, &ubi->scrub);
1334c91a719dSKyungmin Park 	spin_unlock(&ubi->wl_lock);
1335c91a719dSKyungmin Park 
1336c91a719dSKyungmin Park 	/*
1337c91a719dSKyungmin Park 	 * Technically scrubbing is the same as wear-leveling, so it is done
1338c91a719dSKyungmin Park 	 * by the WL worker.
1339c91a719dSKyungmin Park 	 */
1340ff94bc40SHeiko Schocher 	return ensure_wear_leveling(ubi, 0);
1341c91a719dSKyungmin Park }
1342c91a719dSKyungmin Park 
1343c91a719dSKyungmin Park /**
1344c91a719dSKyungmin Park  * ubi_wl_flush - flush all pending works.
1345c91a719dSKyungmin Park  * @ubi: UBI device description object
1346ff94bc40SHeiko Schocher  * @vol_id: the volume id to flush for
1347ff94bc40SHeiko Schocher  * @lnum: the logical eraseblock number to flush for
1348c91a719dSKyungmin Park  *
1349ff94bc40SHeiko Schocher  * This function executes all pending works for a particular volume id /
1350ff94bc40SHeiko Schocher  * logical eraseblock number pair. If either value is set to %UBI_ALL, then it
1351ff94bc40SHeiko Schocher  * acts as a wildcard for all of the corresponding volume numbers or logical
1352ff94bc40SHeiko Schocher  * eraseblock numbers. It returns zero in case of success and a negative error
1353ff94bc40SHeiko Schocher  * code in case of failure.
1354c91a719dSKyungmin Park  */
1355ff94bc40SHeiko Schocher int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
1356c91a719dSKyungmin Park {
1357ff94bc40SHeiko Schocher 	int err = 0;
1358ff94bc40SHeiko Schocher 	int found = 1;
1359c91a719dSKyungmin Park 
1360c91a719dSKyungmin Park 	/*
1361ff94bc40SHeiko Schocher 	 * Erase while the pending works queue is not empty, but not more than
1362c91a719dSKyungmin Park 	 * the number of currently pending works.
1363c91a719dSKyungmin Park 	 */
1364ff94bc40SHeiko Schocher 	dbg_wl("flush pending work for LEB %d:%d (%d pending works)",
1365ff94bc40SHeiko Schocher 	       vol_id, lnum, ubi->works_count);
1366ff94bc40SHeiko Schocher 
1367ff94bc40SHeiko Schocher 	while (found) {
13680195a7bbSHeiko Schocher 		struct ubi_work *wrk, *tmp;
1369ff94bc40SHeiko Schocher 		found = 0;
1370ff94bc40SHeiko Schocher 
1371ff94bc40SHeiko Schocher 		down_read(&ubi->work_sem);
1372ff94bc40SHeiko Schocher 		spin_lock(&ubi->wl_lock);
13730195a7bbSHeiko Schocher 		list_for_each_entry_safe(wrk, tmp, &ubi->works, list) {
1374ff94bc40SHeiko Schocher 			if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) &&
1375ff94bc40SHeiko Schocher 			    (lnum == UBI_ALL || wrk->lnum == lnum)) {
1376ff94bc40SHeiko Schocher 				list_del(&wrk->list);
1377ff94bc40SHeiko Schocher 				ubi->works_count -= 1;
1378ff94bc40SHeiko Schocher 				ubi_assert(ubi->works_count >= 0);
1379ff94bc40SHeiko Schocher 				spin_unlock(&ubi->wl_lock);
1380ff94bc40SHeiko Schocher 
1381ff94bc40SHeiko Schocher 				err = wrk->func(ubi, wrk, 0);
1382ff94bc40SHeiko Schocher 				if (err) {
1383ff94bc40SHeiko Schocher 					up_read(&ubi->work_sem);
1384c91a719dSKyungmin Park 					return err;
1385c91a719dSKyungmin Park 				}
1386c91a719dSKyungmin Park 
1387ff94bc40SHeiko Schocher 				spin_lock(&ubi->wl_lock);
1388ff94bc40SHeiko Schocher 				found = 1;
1389ff94bc40SHeiko Schocher 				break;
1390ff94bc40SHeiko Schocher 			}
1391ff94bc40SHeiko Schocher 		}
1392ff94bc40SHeiko Schocher 		spin_unlock(&ubi->wl_lock);
1393ff94bc40SHeiko Schocher 		up_read(&ubi->work_sem);
1394ff94bc40SHeiko Schocher 	}
1395ff94bc40SHeiko Schocher 
1396c91a719dSKyungmin Park 	/*
1397c91a719dSKyungmin Park 	 * Make sure all the works which have been done in parallel are
1398c91a719dSKyungmin Park 	 * finished.
1399c91a719dSKyungmin Park 	 */
1400c91a719dSKyungmin Park 	down_write(&ubi->work_sem);
1401c91a719dSKyungmin Park 	up_write(&ubi->work_sem);
1402c91a719dSKyungmin Park 
1403c91a719dSKyungmin Park 	return err;
1404c91a719dSKyungmin Park }
1405c91a719dSKyungmin Park 
1406c91a719dSKyungmin Park /**
1407c91a719dSKyungmin Park  * tree_destroy - destroy an RB-tree.
14080195a7bbSHeiko Schocher  * @ubi: UBI device description object
1409c91a719dSKyungmin Park  * @root: the root of the tree to destroy
1410c91a719dSKyungmin Park  */
14110195a7bbSHeiko Schocher static void tree_destroy(struct ubi_device *ubi, struct rb_root *root)
1412c91a719dSKyungmin Park {
1413c91a719dSKyungmin Park 	struct rb_node *rb;
1414c91a719dSKyungmin Park 	struct ubi_wl_entry *e;
1415c91a719dSKyungmin Park 
1416c91a719dSKyungmin Park 	rb = root->rb_node;
1417c91a719dSKyungmin Park 	while (rb) {
1418c91a719dSKyungmin Park 		if (rb->rb_left)
1419c91a719dSKyungmin Park 			rb = rb->rb_left;
1420c91a719dSKyungmin Park 		else if (rb->rb_right)
1421c91a719dSKyungmin Park 			rb = rb->rb_right;
1422c91a719dSKyungmin Park 		else {
1423ff94bc40SHeiko Schocher 			e = rb_entry(rb, struct ubi_wl_entry, u.rb);
1424c91a719dSKyungmin Park 
1425c91a719dSKyungmin Park 			rb = rb_parent(rb);
1426c91a719dSKyungmin Park 			if (rb) {
1427ff94bc40SHeiko Schocher 				if (rb->rb_left == &e->u.rb)
1428c91a719dSKyungmin Park 					rb->rb_left = NULL;
1429c91a719dSKyungmin Park 				else
1430c91a719dSKyungmin Park 					rb->rb_right = NULL;
1431c91a719dSKyungmin Park 			}
1432c91a719dSKyungmin Park 
14330195a7bbSHeiko Schocher 			wl_entry_destroy(ubi, e);
1434c91a719dSKyungmin Park 		}
1435c91a719dSKyungmin Park 	}
1436c91a719dSKyungmin Park }
1437c91a719dSKyungmin Park 
1438c91a719dSKyungmin Park /**
1439c91a719dSKyungmin Park  * ubi_thread - UBI background thread.
1440c91a719dSKyungmin Park  * @u: the UBI device description object pointer
1441c91a719dSKyungmin Park  */
1442c91a719dSKyungmin Park int ubi_thread(void *u)
1443c91a719dSKyungmin Park {
1444c91a719dSKyungmin Park 	int failures = 0;
1445c91a719dSKyungmin Park 	struct ubi_device *ubi = u;
1446c91a719dSKyungmin Park 
14470195a7bbSHeiko Schocher 	ubi_msg(ubi, "background thread \"%s\" started, PID %d",
1448c91a719dSKyungmin Park 		ubi->bgt_name, task_pid_nr(current));
1449c91a719dSKyungmin Park 
1450c91a719dSKyungmin Park 	set_freezable();
1451c91a719dSKyungmin Park 	for (;;) {
1452c91a719dSKyungmin Park 		int err;
1453c91a719dSKyungmin Park 
1454c91a719dSKyungmin Park 		if (kthread_should_stop())
1455c91a719dSKyungmin Park 			break;
1456c91a719dSKyungmin Park 
1457c91a719dSKyungmin Park 		if (try_to_freeze())
1458c91a719dSKyungmin Park 			continue;
1459c91a719dSKyungmin Park 
1460c91a719dSKyungmin Park 		spin_lock(&ubi->wl_lock);
1461c91a719dSKyungmin Park 		if (list_empty(&ubi->works) || ubi->ro_mode ||
1462ff94bc40SHeiko Schocher 		    !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi)) {
1463c91a719dSKyungmin Park 			set_current_state(TASK_INTERRUPTIBLE);
1464c91a719dSKyungmin Park 			spin_unlock(&ubi->wl_lock);
1465c91a719dSKyungmin Park 			schedule();
1466c91a719dSKyungmin Park 			continue;
1467c91a719dSKyungmin Park 		}
1468c91a719dSKyungmin Park 		spin_unlock(&ubi->wl_lock);
1469c91a719dSKyungmin Park 
1470c91a719dSKyungmin Park 		err = do_work(ubi);
1471c91a719dSKyungmin Park 		if (err) {
14720195a7bbSHeiko Schocher 			ubi_err(ubi, "%s: work failed with error code %d",
1473c91a719dSKyungmin Park 				ubi->bgt_name, err);
1474c91a719dSKyungmin Park 			if (failures++ > WL_MAX_FAILURES) {
1475c91a719dSKyungmin Park 				/*
1476c91a719dSKyungmin Park 				 * Too many failures, disable the thread and
1477c91a719dSKyungmin Park 				 * switch to read-only mode.
1478c91a719dSKyungmin Park 				 */
14790195a7bbSHeiko Schocher 				ubi_msg(ubi, "%s: %d consecutive failures",
1480c91a719dSKyungmin Park 					ubi->bgt_name, WL_MAX_FAILURES);
1481c91a719dSKyungmin Park 				ubi_ro_mode(ubi);
1482ff94bc40SHeiko Schocher 				ubi->thread_enabled = 0;
1483ff94bc40SHeiko Schocher 				continue;
1484c91a719dSKyungmin Park 			}
1485c91a719dSKyungmin Park 		} else
1486c91a719dSKyungmin Park 			failures = 0;
1487c91a719dSKyungmin Park 
1488c91a719dSKyungmin Park 		cond_resched();
1489c91a719dSKyungmin Park 	}
1490c91a719dSKyungmin Park 
1491c91a719dSKyungmin Park 	dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
1492c91a719dSKyungmin Park 	return 0;
1493c91a719dSKyungmin Park }
1494c91a719dSKyungmin Park 
1495c91a719dSKyungmin Park /**
14960195a7bbSHeiko Schocher  * shutdown_work - shutdown all pending works.
1497c91a719dSKyungmin Park  * @ubi: UBI device description object
1498c91a719dSKyungmin Park  */
14990195a7bbSHeiko Schocher static void shutdown_work(struct ubi_device *ubi)
1500c91a719dSKyungmin Park {
15010195a7bbSHeiko Schocher #ifdef CONFIG_MTD_UBI_FASTMAP
15020195a7bbSHeiko Schocher #ifndef __UBOOT__
15030195a7bbSHeiko Schocher 	flush_work(&ubi->fm_work);
15040195a7bbSHeiko Schocher #else
15050195a7bbSHeiko Schocher 	/* in U-Boot, we have all work done */
15060195a7bbSHeiko Schocher #endif
15070195a7bbSHeiko Schocher #endif
1508c91a719dSKyungmin Park 	while (!list_empty(&ubi->works)) {
1509c91a719dSKyungmin Park 		struct ubi_work *wrk;
1510c91a719dSKyungmin Park 
1511c91a719dSKyungmin Park 		wrk = list_entry(ubi->works.next, struct ubi_work, list);
1512c91a719dSKyungmin Park 		list_del(&wrk->list);
1513c91a719dSKyungmin Park 		wrk->func(ubi, wrk, 1);
1514c91a719dSKyungmin Park 		ubi->works_count -= 1;
1515c91a719dSKyungmin Park 		ubi_assert(ubi->works_count >= 0);
1516c91a719dSKyungmin Park 	}
1517c91a719dSKyungmin Park }
1518c91a719dSKyungmin Park 
1519c91a719dSKyungmin Park /**
1520ff94bc40SHeiko Schocher  * ubi_wl_init - initialize the WL sub-system using attaching information.
1521c91a719dSKyungmin Park  * @ubi: UBI device description object
1522ff94bc40SHeiko Schocher  * @ai: attaching information
1523c91a719dSKyungmin Park  *
1524c91a719dSKyungmin Park  * This function returns zero in case of success, and a negative error code in
1525c91a719dSKyungmin Park  * case of failure.
1526c91a719dSKyungmin Park  */
1527ff94bc40SHeiko Schocher int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1528c91a719dSKyungmin Park {
1529ff94bc40SHeiko Schocher 	int err, i, reserved_pebs, found_pebs = 0;
1530c91a719dSKyungmin Park 	struct rb_node *rb1, *rb2;
1531ff94bc40SHeiko Schocher 	struct ubi_ainf_volume *av;
1532ff94bc40SHeiko Schocher 	struct ubi_ainf_peb *aeb, *tmp;
1533c91a719dSKyungmin Park 	struct ubi_wl_entry *e;
1534c91a719dSKyungmin Park 
1535ff94bc40SHeiko Schocher 	ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT;
1536c91a719dSKyungmin Park 	spin_lock_init(&ubi->wl_lock);
1537c91a719dSKyungmin Park 	mutex_init(&ubi->move_mutex);
1538c91a719dSKyungmin Park 	init_rwsem(&ubi->work_sem);
1539ff94bc40SHeiko Schocher 	ubi->max_ec = ai->max_ec;
1540c91a719dSKyungmin Park 	INIT_LIST_HEAD(&ubi->works);
1541c91a719dSKyungmin Park 
1542c91a719dSKyungmin Park 	sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
1543c91a719dSKyungmin Park 
1544c91a719dSKyungmin Park 	err = -ENOMEM;
1545c91a719dSKyungmin Park 	ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL);
1546c91a719dSKyungmin Park 	if (!ubi->lookuptbl)
1547c91a719dSKyungmin Park 		return err;
1548c91a719dSKyungmin Park 
1549ff94bc40SHeiko Schocher 	for (i = 0; i < UBI_PROT_QUEUE_LEN; i++)
1550ff94bc40SHeiko Schocher 		INIT_LIST_HEAD(&ubi->pq[i]);
1551ff94bc40SHeiko Schocher 	ubi->pq_head = 0;
1552ff94bc40SHeiko Schocher 
155368fc4490SHeiko Schocher 	ubi->free_count = 0;
1554ff94bc40SHeiko Schocher 	list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) {
1555c91a719dSKyungmin Park 		cond_resched();
1556c91a719dSKyungmin Park 
1557c91a719dSKyungmin Park 		e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1558c91a719dSKyungmin Park 		if (!e)
1559c91a719dSKyungmin Park 			goto out_free;
1560c91a719dSKyungmin Park 
1561ff94bc40SHeiko Schocher 		e->pnum = aeb->pnum;
1562ff94bc40SHeiko Schocher 		e->ec = aeb->ec;
1563c91a719dSKyungmin Park 		ubi->lookuptbl[e->pnum] = e;
1564ff94bc40SHeiko Schocher 		if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) {
15650195a7bbSHeiko Schocher 			wl_entry_destroy(ubi, e);
1566c91a719dSKyungmin Park 			goto out_free;
1567c91a719dSKyungmin Park 		}
1568ff94bc40SHeiko Schocher 
1569ff94bc40SHeiko Schocher 		found_pebs++;
1570c91a719dSKyungmin Park 	}
1571c91a719dSKyungmin Park 
1572ff94bc40SHeiko Schocher 	list_for_each_entry(aeb, &ai->free, u.list) {
1573c91a719dSKyungmin Park 		cond_resched();
1574c91a719dSKyungmin Park 
1575c91a719dSKyungmin Park 		e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1576c91a719dSKyungmin Park 		if (!e)
1577c91a719dSKyungmin Park 			goto out_free;
1578c91a719dSKyungmin Park 
1579ff94bc40SHeiko Schocher 		e->pnum = aeb->pnum;
1580ff94bc40SHeiko Schocher 		e->ec = aeb->ec;
1581c91a719dSKyungmin Park 		ubi_assert(e->ec >= 0);
1582ff94bc40SHeiko Schocher 
1583c91a719dSKyungmin Park 		wl_tree_add(e, &ubi->free);
1584ff94bc40SHeiko Schocher 		ubi->free_count++;
1585ff94bc40SHeiko Schocher 
1586c91a719dSKyungmin Park 		ubi->lookuptbl[e->pnum] = e;
1587ff94bc40SHeiko Schocher 
1588ff94bc40SHeiko Schocher 		found_pebs++;
1589c91a719dSKyungmin Park 	}
1590c91a719dSKyungmin Park 
1591ff94bc40SHeiko Schocher 	ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
1592ff94bc40SHeiko Schocher 		ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
1593c91a719dSKyungmin Park 			cond_resched();
1594c91a719dSKyungmin Park 
1595c91a719dSKyungmin Park 			e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1596c91a719dSKyungmin Park 			if (!e)
1597c91a719dSKyungmin Park 				goto out_free;
1598c91a719dSKyungmin Park 
1599ff94bc40SHeiko Schocher 			e->pnum = aeb->pnum;
1600ff94bc40SHeiko Schocher 			e->ec = aeb->ec;
1601c91a719dSKyungmin Park 			ubi->lookuptbl[e->pnum] = e;
1602c91a719dSKyungmin Park 
1603ff94bc40SHeiko Schocher 			if (!aeb->scrub) {
1604c91a719dSKyungmin Park 				dbg_wl("add PEB %d EC %d to the used tree",
1605c91a719dSKyungmin Park 				       e->pnum, e->ec);
1606c91a719dSKyungmin Park 				wl_tree_add(e, &ubi->used);
1607c91a719dSKyungmin Park 			} else {
1608c91a719dSKyungmin Park 				dbg_wl("add PEB %d EC %d to the scrub tree",
1609c91a719dSKyungmin Park 				       e->pnum, e->ec);
1610c91a719dSKyungmin Park 				wl_tree_add(e, &ubi->scrub);
1611c91a719dSKyungmin Park 			}
1612ff94bc40SHeiko Schocher 
1613ff94bc40SHeiko Schocher 			found_pebs++;
1614c91a719dSKyungmin Park 		}
1615c91a719dSKyungmin Park 	}
1616c91a719dSKyungmin Park 
1617ff94bc40SHeiko Schocher 	dbg_wl("found %i PEBs", found_pebs);
1618ff94bc40SHeiko Schocher 
16190195a7bbSHeiko Schocher 	if (ubi->fm) {
16200195a7bbSHeiko Schocher 		ubi_assert(ubi->good_peb_count ==
1621ff94bc40SHeiko Schocher 			   found_pebs + ubi->fm->used_blocks);
16220195a7bbSHeiko Schocher 
16230195a7bbSHeiko Schocher 		for (i = 0; i < ubi->fm->used_blocks; i++) {
16240195a7bbSHeiko Schocher 			e = ubi->fm->e[i];
16250195a7bbSHeiko Schocher 			ubi->lookuptbl[e->pnum] = e;
16260195a7bbSHeiko Schocher 		}
16270195a7bbSHeiko Schocher 	}
1628ff94bc40SHeiko Schocher 	else
1629ff94bc40SHeiko Schocher 		ubi_assert(ubi->good_peb_count == found_pebs);
1630ff94bc40SHeiko Schocher 
1631ff94bc40SHeiko Schocher 	reserved_pebs = WL_RESERVED_PEBS;
16320195a7bbSHeiko Schocher 	ubi_fastmap_init(ubi, &reserved_pebs);
1633ff94bc40SHeiko Schocher 
1634ff94bc40SHeiko Schocher 	if (ubi->avail_pebs < reserved_pebs) {
16350195a7bbSHeiko Schocher 		ubi_err(ubi, "no enough physical eraseblocks (%d, need %d)",
1636ff94bc40SHeiko Schocher 			ubi->avail_pebs, reserved_pebs);
1637ff94bc40SHeiko Schocher 		if (ubi->corr_peb_count)
16380195a7bbSHeiko Schocher 			ubi_err(ubi, "%d PEBs are corrupted and not used",
1639ff94bc40SHeiko Schocher 				ubi->corr_peb_count);
1640c91a719dSKyungmin Park 		goto out_free;
1641c91a719dSKyungmin Park 	}
1642ff94bc40SHeiko Schocher 	ubi->avail_pebs -= reserved_pebs;
1643ff94bc40SHeiko Schocher 	ubi->rsvd_pebs += reserved_pebs;
1644c91a719dSKyungmin Park 
1645c91a719dSKyungmin Park 	/* Schedule wear-leveling if needed */
1646ff94bc40SHeiko Schocher 	err = ensure_wear_leveling(ubi, 0);
1647c91a719dSKyungmin Park 	if (err)
1648c91a719dSKyungmin Park 		goto out_free;
1649c91a719dSKyungmin Park 
1650c91a719dSKyungmin Park 	return 0;
1651c91a719dSKyungmin Park 
1652c91a719dSKyungmin Park out_free:
16530195a7bbSHeiko Schocher 	shutdown_work(ubi);
16540195a7bbSHeiko Schocher 	tree_destroy(ubi, &ubi->used);
16550195a7bbSHeiko Schocher 	tree_destroy(ubi, &ubi->free);
16560195a7bbSHeiko Schocher 	tree_destroy(ubi, &ubi->scrub);
1657c91a719dSKyungmin Park 	kfree(ubi->lookuptbl);
1658c91a719dSKyungmin Park 	return err;
1659c91a719dSKyungmin Park }
1660c91a719dSKyungmin Park 
1661c91a719dSKyungmin Park /**
1662ff94bc40SHeiko Schocher  * protection_queue_destroy - destroy the protection queue.
1663c91a719dSKyungmin Park  * @ubi: UBI device description object
1664c91a719dSKyungmin Park  */
1665ff94bc40SHeiko Schocher static void protection_queue_destroy(struct ubi_device *ubi)
1666c91a719dSKyungmin Park {
1667ff94bc40SHeiko Schocher 	int i;
1668ff94bc40SHeiko Schocher 	struct ubi_wl_entry *e, *tmp;
1669c91a719dSKyungmin Park 
1670ff94bc40SHeiko Schocher 	for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) {
1671ff94bc40SHeiko Schocher 		list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
1672ff94bc40SHeiko Schocher 			list_del(&e->u.list);
16730195a7bbSHeiko Schocher 			wl_entry_destroy(ubi, e);
1674c91a719dSKyungmin Park 		}
1675c91a719dSKyungmin Park 	}
1676c91a719dSKyungmin Park }
1677c91a719dSKyungmin Park 
1678c91a719dSKyungmin Park /**
1679ff94bc40SHeiko Schocher  * ubi_wl_close - close the wear-leveling sub-system.
1680c91a719dSKyungmin Park  * @ubi: UBI device description object
1681c91a719dSKyungmin Park  */
1682c91a719dSKyungmin Park void ubi_wl_close(struct ubi_device *ubi)
1683c91a719dSKyungmin Park {
1684ff94bc40SHeiko Schocher 	dbg_wl("close the WL sub-system");
16850195a7bbSHeiko Schocher 	ubi_fastmap_close(ubi);
16860195a7bbSHeiko Schocher 	shutdown_work(ubi);
1687ff94bc40SHeiko Schocher 	protection_queue_destroy(ubi);
16880195a7bbSHeiko Schocher 	tree_destroy(ubi, &ubi->used);
16890195a7bbSHeiko Schocher 	tree_destroy(ubi, &ubi->erroneous);
16900195a7bbSHeiko Schocher 	tree_destroy(ubi, &ubi->free);
16910195a7bbSHeiko Schocher 	tree_destroy(ubi, &ubi->scrub);
1692c91a719dSKyungmin Park 	kfree(ubi->lookuptbl);
1693c91a719dSKyungmin Park }
1694c91a719dSKyungmin Park 
1695c91a719dSKyungmin Park /**
1696ff94bc40SHeiko Schocher  * self_check_ec - make sure that the erase counter of a PEB is correct.
1697c91a719dSKyungmin Park  * @ubi: UBI device description object
1698c91a719dSKyungmin Park  * @pnum: the physical eraseblock number to check
1699c91a719dSKyungmin Park  * @ec: the erase counter to check
1700c91a719dSKyungmin Park  *
1701c91a719dSKyungmin Park  * This function returns zero if the erase counter of physical eraseblock @pnum
1702ff94bc40SHeiko Schocher  * is equivalent to @ec, and a negative error code if not or if an error
1703c91a719dSKyungmin Park  * occurred.
1704c91a719dSKyungmin Park  */
1705ff94bc40SHeiko Schocher static int self_check_ec(struct ubi_device *ubi, int pnum, int ec)
1706c91a719dSKyungmin Park {
1707c91a719dSKyungmin Park 	int err;
1708c91a719dSKyungmin Park 	long long read_ec;
1709c91a719dSKyungmin Park 	struct ubi_ec_hdr *ec_hdr;
1710c91a719dSKyungmin Park 
1711ff94bc40SHeiko Schocher 	if (!ubi_dbg_chk_gen(ubi))
1712ff94bc40SHeiko Schocher 		return 0;
1713ff94bc40SHeiko Schocher 
1714c91a719dSKyungmin Park 	ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
1715c91a719dSKyungmin Park 	if (!ec_hdr)
1716c91a719dSKyungmin Park 		return -ENOMEM;
1717c91a719dSKyungmin Park 
1718c91a719dSKyungmin Park 	err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1719c91a719dSKyungmin Park 	if (err && err != UBI_IO_BITFLIPS) {
1720c91a719dSKyungmin Park 		/* The header does not have to exist */
1721c91a719dSKyungmin Park 		err = 0;
1722c91a719dSKyungmin Park 		goto out_free;
1723c91a719dSKyungmin Park 	}
1724c91a719dSKyungmin Park 
1725c91a719dSKyungmin Park 	read_ec = be64_to_cpu(ec_hdr->ec);
1726ff94bc40SHeiko Schocher 	if (ec != read_ec && read_ec - ec > 1) {
17270195a7bbSHeiko Schocher 		ubi_err(ubi, "self-check failed for PEB %d", pnum);
17280195a7bbSHeiko Schocher 		ubi_err(ubi, "read EC is %lld, should be %d", read_ec, ec);
1729ff94bc40SHeiko Schocher 		dump_stack();
1730c91a719dSKyungmin Park 		err = 1;
1731c91a719dSKyungmin Park 	} else
1732c91a719dSKyungmin Park 		err = 0;
1733c91a719dSKyungmin Park 
1734c91a719dSKyungmin Park out_free:
1735c91a719dSKyungmin Park 	kfree(ec_hdr);
1736c91a719dSKyungmin Park 	return err;
1737c91a719dSKyungmin Park }
1738c91a719dSKyungmin Park 
1739c91a719dSKyungmin Park /**
1740ff94bc40SHeiko Schocher  * self_check_in_wl_tree - check that wear-leveling entry is in WL RB-tree.
1741ff94bc40SHeiko Schocher  * @ubi: UBI device description object
1742c91a719dSKyungmin Park  * @e: the wear-leveling entry to check
1743c91a719dSKyungmin Park  * @root: the root of the tree
1744c91a719dSKyungmin Park  *
1745ff94bc40SHeiko Schocher  * This function returns zero if @e is in the @root RB-tree and %-EINVAL if it
1746c91a719dSKyungmin Park  * is not.
1747c91a719dSKyungmin Park  */
1748ff94bc40SHeiko Schocher static int self_check_in_wl_tree(const struct ubi_device *ubi,
1749ff94bc40SHeiko Schocher 				 struct ubi_wl_entry *e, struct rb_root *root)
1750c91a719dSKyungmin Park {
1751ff94bc40SHeiko Schocher 	if (!ubi_dbg_chk_gen(ubi))
1752ff94bc40SHeiko Schocher 		return 0;
1753ff94bc40SHeiko Schocher 
1754c91a719dSKyungmin Park 	if (in_wl_tree(e, root))
1755c91a719dSKyungmin Park 		return 0;
1756c91a719dSKyungmin Park 
17570195a7bbSHeiko Schocher 	ubi_err(ubi, "self-check failed for PEB %d, EC %d, RB-tree %p ",
1758c91a719dSKyungmin Park 		e->pnum, e->ec, root);
1759ff94bc40SHeiko Schocher 	dump_stack();
1760ff94bc40SHeiko Schocher 	return -EINVAL;
1761c91a719dSKyungmin Park }
1762c91a719dSKyungmin Park 
1763ff94bc40SHeiko Schocher /**
1764ff94bc40SHeiko Schocher  * self_check_in_pq - check if wear-leveling entry is in the protection
1765ff94bc40SHeiko Schocher  *                        queue.
1766ff94bc40SHeiko Schocher  * @ubi: UBI device description object
1767ff94bc40SHeiko Schocher  * @e: the wear-leveling entry to check
1768ff94bc40SHeiko Schocher  *
1769ff94bc40SHeiko Schocher  * This function returns zero if @e is in @ubi->pq and %-EINVAL if it is not.
1770ff94bc40SHeiko Schocher  */
1771ff94bc40SHeiko Schocher static int self_check_in_pq(const struct ubi_device *ubi,
1772ff94bc40SHeiko Schocher 			    struct ubi_wl_entry *e)
1773ff94bc40SHeiko Schocher {
1774ff94bc40SHeiko Schocher 	struct ubi_wl_entry *p;
1775ff94bc40SHeiko Schocher 	int i;
1776ff94bc40SHeiko Schocher 
1777ff94bc40SHeiko Schocher 	if (!ubi_dbg_chk_gen(ubi))
1778ff94bc40SHeiko Schocher 		return 0;
1779ff94bc40SHeiko Schocher 
1780ff94bc40SHeiko Schocher 	for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
1781ff94bc40SHeiko Schocher 		list_for_each_entry(p, &ubi->pq[i], u.list)
1782ff94bc40SHeiko Schocher 			if (p == e)
1783ff94bc40SHeiko Schocher 				return 0;
1784ff94bc40SHeiko Schocher 
17850195a7bbSHeiko Schocher 	ubi_err(ubi, "self-check failed for PEB %d, EC %d, Protect queue",
1786ff94bc40SHeiko Schocher 		e->pnum, e->ec);
1787ff94bc40SHeiko Schocher 	dump_stack();
1788ff94bc40SHeiko Schocher 	return -EINVAL;
1789ff94bc40SHeiko Schocher }
17900195a7bbSHeiko Schocher #ifndef CONFIG_MTD_UBI_FASTMAP
17910195a7bbSHeiko Schocher static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
17920195a7bbSHeiko Schocher {
17930195a7bbSHeiko Schocher 	struct ubi_wl_entry *e;
17940195a7bbSHeiko Schocher 
17950195a7bbSHeiko Schocher 	e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
17960195a7bbSHeiko Schocher 	self_check_in_wl_tree(ubi, e, &ubi->free);
17970195a7bbSHeiko Schocher 	ubi->free_count--;
17980195a7bbSHeiko Schocher 	ubi_assert(ubi->free_count >= 0);
17990195a7bbSHeiko Schocher 	rb_erase(&e->u.rb, &ubi->free);
18000195a7bbSHeiko Schocher 
18010195a7bbSHeiko Schocher 	return e;
18020195a7bbSHeiko Schocher }
18030195a7bbSHeiko Schocher 
18040195a7bbSHeiko Schocher /**
18050195a7bbSHeiko Schocher  * produce_free_peb - produce a free physical eraseblock.
18060195a7bbSHeiko Schocher  * @ubi: UBI device description object
18070195a7bbSHeiko Schocher  *
18080195a7bbSHeiko Schocher  * This function tries to make a free PEB by means of synchronous execution of
18090195a7bbSHeiko Schocher  * pending works. This may be needed if, for example the background thread is
18100195a7bbSHeiko Schocher  * disabled. Returns zero in case of success and a negative error code in case
18110195a7bbSHeiko Schocher  * of failure.
18120195a7bbSHeiko Schocher  */
18130195a7bbSHeiko Schocher static int produce_free_peb(struct ubi_device *ubi)
18140195a7bbSHeiko Schocher {
18150195a7bbSHeiko Schocher 	int err;
18160195a7bbSHeiko Schocher 
18170195a7bbSHeiko Schocher 	while (!ubi->free.rb_node && ubi->works_count) {
18180195a7bbSHeiko Schocher 		spin_unlock(&ubi->wl_lock);
18190195a7bbSHeiko Schocher 
18200195a7bbSHeiko Schocher 		dbg_wl("do one work synchronously");
18210195a7bbSHeiko Schocher 		err = do_work(ubi);
18220195a7bbSHeiko Schocher 
18230195a7bbSHeiko Schocher 		spin_lock(&ubi->wl_lock);
18240195a7bbSHeiko Schocher 		if (err)
18250195a7bbSHeiko Schocher 			return err;
18260195a7bbSHeiko Schocher 	}
18270195a7bbSHeiko Schocher 
18280195a7bbSHeiko Schocher 	return 0;
18290195a7bbSHeiko Schocher }
18300195a7bbSHeiko Schocher 
18310195a7bbSHeiko Schocher /**
18320195a7bbSHeiko Schocher  * ubi_wl_get_peb - get a physical eraseblock.
18330195a7bbSHeiko Schocher  * @ubi: UBI device description object
18340195a7bbSHeiko Schocher  *
18350195a7bbSHeiko Schocher  * This function returns a physical eraseblock in case of success and a
18360195a7bbSHeiko Schocher  * negative error code in case of failure.
18370195a7bbSHeiko Schocher  * Returns with ubi->fm_eba_sem held in read mode!
18380195a7bbSHeiko Schocher  */
18390195a7bbSHeiko Schocher int ubi_wl_get_peb(struct ubi_device *ubi)
18400195a7bbSHeiko Schocher {
18410195a7bbSHeiko Schocher 	int err;
18420195a7bbSHeiko Schocher 	struct ubi_wl_entry *e;
18430195a7bbSHeiko Schocher 
18440195a7bbSHeiko Schocher retry:
18450195a7bbSHeiko Schocher 	down_read(&ubi->fm_eba_sem);
18460195a7bbSHeiko Schocher 	spin_lock(&ubi->wl_lock);
18470195a7bbSHeiko Schocher 	if (!ubi->free.rb_node) {
18480195a7bbSHeiko Schocher 		if (ubi->works_count == 0) {
18490195a7bbSHeiko Schocher 			ubi_err(ubi, "no free eraseblocks");
18500195a7bbSHeiko Schocher 			ubi_assert(list_empty(&ubi->works));
18510195a7bbSHeiko Schocher 			spin_unlock(&ubi->wl_lock);
18520195a7bbSHeiko Schocher 			return -ENOSPC;
18530195a7bbSHeiko Schocher 		}
18540195a7bbSHeiko Schocher 
18550195a7bbSHeiko Schocher 		err = produce_free_peb(ubi);
18560195a7bbSHeiko Schocher 		if (err < 0) {
18570195a7bbSHeiko Schocher 			spin_unlock(&ubi->wl_lock);
18580195a7bbSHeiko Schocher 			return err;
18590195a7bbSHeiko Schocher 		}
18600195a7bbSHeiko Schocher 		spin_unlock(&ubi->wl_lock);
18610195a7bbSHeiko Schocher 		up_read(&ubi->fm_eba_sem);
18620195a7bbSHeiko Schocher 		goto retry;
18630195a7bbSHeiko Schocher 
18640195a7bbSHeiko Schocher 	}
18650195a7bbSHeiko Schocher 	e = wl_get_wle(ubi);
18660195a7bbSHeiko Schocher 	prot_queue_add(ubi, e);
18670195a7bbSHeiko Schocher 	spin_unlock(&ubi->wl_lock);
18680195a7bbSHeiko Schocher 
18690195a7bbSHeiko Schocher 	err = ubi_self_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset,
18700195a7bbSHeiko Schocher 				    ubi->peb_size - ubi->vid_hdr_aloffset);
18710195a7bbSHeiko Schocher 	if (err) {
18720195a7bbSHeiko Schocher 		ubi_err(ubi, "new PEB %d does not contain all 0xFF bytes", e->pnum);
18730195a7bbSHeiko Schocher 		return err;
18740195a7bbSHeiko Schocher 	}
18750195a7bbSHeiko Schocher 
18760195a7bbSHeiko Schocher 	return e->pnum;
18770195a7bbSHeiko Schocher }
18780195a7bbSHeiko Schocher #else
18790195a7bbSHeiko Schocher #include "fastmap-wl.c"
18800195a7bbSHeiko Schocher #endif
1881