xref: /openbmc/linux/drivers/mtd/ubi/fastmap-wl.c (revision 6c33a6f4)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2012 Linutronix GmbH
4  * Copyright (c) 2014 sigma star gmbh
5  * Author: Richard Weinberger <richard@nod.at>
6  */
7 
8 /**
9  * update_fastmap_work_fn - calls ubi_update_fastmap from a work queue
10  * @wrk: the work description object
11  */
12 static void update_fastmap_work_fn(struct work_struct *wrk)
13 {
14 	struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
15 
16 	ubi_update_fastmap(ubi);
17 	spin_lock(&ubi->wl_lock);
18 	ubi->fm_work_scheduled = 0;
19 	spin_unlock(&ubi->wl_lock);
20 }
21 
22 /**
23  * find_anchor_wl_entry - find wear-leveling entry to used as anchor PEB.
24  * @root: the RB-tree where to look for
25  */
26 static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
27 {
28 	struct rb_node *p;
29 	struct ubi_wl_entry *e, *victim = NULL;
30 	int max_ec = UBI_MAX_ERASECOUNTER;
31 
32 	ubi_rb_for_each_entry(p, e, root, u.rb) {
33 		if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) {
34 			victim = e;
35 			max_ec = e->ec;
36 		}
37 	}
38 
39 	return victim;
40 }
41 
42 /**
43  * return_unused_pool_pebs - returns unused PEB to the free tree.
44  * @ubi: UBI device description object
45  * @pool: fastmap pool description object
46  */
47 static void return_unused_pool_pebs(struct ubi_device *ubi,
48 				    struct ubi_fm_pool *pool)
49 {
50 	int i;
51 	struct ubi_wl_entry *e;
52 
53 	for (i = pool->used; i < pool->size; i++) {
54 		e = ubi->lookuptbl[pool->pebs[i]];
55 		wl_tree_add(e, &ubi->free);
56 		ubi->free_count++;
57 	}
58 }
59 
60 /**
61  * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
62  * @ubi: UBI device description object
63  * @anchor: This PEB will be used as anchor PEB by fastmap
64  *
65  * The function returns a physical erase block with a given maximal number
66  * and removes it from the wl subsystem.
67  * Must be called with wl_lock held!
68  */
69 struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
70 {
71 	struct ubi_wl_entry *e = NULL;
72 
73 	if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
74 		goto out;
75 
76 	if (anchor)
77 		e = find_anchor_wl_entry(&ubi->free);
78 	else
79 		e = find_mean_wl_entry(ubi, &ubi->free);
80 
81 	if (!e)
82 		goto out;
83 
84 	self_check_in_wl_tree(ubi, e, &ubi->free);
85 
86 	/* remove it from the free list,
87 	 * the wl subsystem does no longer know this erase block */
88 	rb_erase(&e->u.rb, &ubi->free);
89 	ubi->free_count--;
90 out:
91 	return e;
92 }
93 
94 /**
95  * ubi_refill_pools - refills all fastmap PEB pools.
96  * @ubi: UBI device description object
97  */
98 void ubi_refill_pools(struct ubi_device *ubi)
99 {
100 	struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
101 	struct ubi_fm_pool *pool = &ubi->fm_pool;
102 	struct ubi_wl_entry *e;
103 	int enough;
104 
105 	spin_lock(&ubi->wl_lock);
106 
107 	return_unused_pool_pebs(ubi, wl_pool);
108 	return_unused_pool_pebs(ubi, pool);
109 
110 	wl_pool->size = 0;
111 	pool->size = 0;
112 
113 	for (;;) {
114 		enough = 0;
115 		if (pool->size < pool->max_size) {
116 			if (!ubi->free.rb_node)
117 				break;
118 
119 			e = wl_get_wle(ubi);
120 			if (!e)
121 				break;
122 
123 			pool->pebs[pool->size] = e->pnum;
124 			pool->size++;
125 		} else
126 			enough++;
127 
128 		if (wl_pool->size < wl_pool->max_size) {
129 			if (!ubi->free.rb_node ||
130 			   (ubi->free_count - ubi->beb_rsvd_pebs < 5))
131 				break;
132 
133 			e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
134 			self_check_in_wl_tree(ubi, e, &ubi->free);
135 			rb_erase(&e->u.rb, &ubi->free);
136 			ubi->free_count--;
137 
138 			wl_pool->pebs[wl_pool->size] = e->pnum;
139 			wl_pool->size++;
140 		} else
141 			enough++;
142 
143 		if (enough == 2)
144 			break;
145 	}
146 
147 	wl_pool->used = 0;
148 	pool->used = 0;
149 
150 	spin_unlock(&ubi->wl_lock);
151 }
152 
153 /**
154  * produce_free_peb - produce a free physical eraseblock.
155  * @ubi: UBI device description object
156  *
157  * This function tries to make a free PEB by means of synchronous execution of
158  * pending works. This may be needed if, for example the background thread is
159  * disabled. Returns zero in case of success and a negative error code in case
160  * of failure.
161  */
162 static int produce_free_peb(struct ubi_device *ubi)
163 {
164 	int err;
165 
166 	while (!ubi->free.rb_node && ubi->works_count) {
167 		dbg_wl("do one work synchronously");
168 		err = do_work(ubi);
169 
170 		if (err)
171 			return err;
172 	}
173 
174 	return 0;
175 }
176 
177 /**
178  * ubi_wl_get_peb - get a physical eraseblock.
179  * @ubi: UBI device description object
180  *
181  * This function returns a physical eraseblock in case of success and a
182  * negative error code in case of failure.
183  * Returns with ubi->fm_eba_sem held in read mode!
184  */
185 int ubi_wl_get_peb(struct ubi_device *ubi)
186 {
187 	int ret, attempts = 0;
188 	struct ubi_fm_pool *pool = &ubi->fm_pool;
189 	struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
190 
191 again:
192 	down_read(&ubi->fm_eba_sem);
193 	spin_lock(&ubi->wl_lock);
194 
195 	/* We check here also for the WL pool because at this point we can
196 	 * refill the WL pool synchronous. */
197 	if (pool->used == pool->size || wl_pool->used == wl_pool->size) {
198 		spin_unlock(&ubi->wl_lock);
199 		up_read(&ubi->fm_eba_sem);
200 		ret = ubi_update_fastmap(ubi);
201 		if (ret) {
202 			ubi_msg(ubi, "Unable to write a new fastmap: %i", ret);
203 			down_read(&ubi->fm_eba_sem);
204 			return -ENOSPC;
205 		}
206 		down_read(&ubi->fm_eba_sem);
207 		spin_lock(&ubi->wl_lock);
208 	}
209 
210 	if (pool->used == pool->size) {
211 		spin_unlock(&ubi->wl_lock);
212 		attempts++;
213 		if (attempts == 10) {
214 			ubi_err(ubi, "Unable to get a free PEB from user WL pool");
215 			ret = -ENOSPC;
216 			goto out;
217 		}
218 		up_read(&ubi->fm_eba_sem);
219 		ret = produce_free_peb(ubi);
220 		if (ret < 0) {
221 			down_read(&ubi->fm_eba_sem);
222 			goto out;
223 		}
224 		goto again;
225 	}
226 
227 	ubi_assert(pool->used < pool->size);
228 	ret = pool->pebs[pool->used++];
229 	prot_queue_add(ubi, ubi->lookuptbl[ret]);
230 	spin_unlock(&ubi->wl_lock);
231 out:
232 	return ret;
233 }
234 
235 /* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system.
236  *
237  * @ubi: UBI device description object
238  */
239 static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
240 {
241 	struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
242 	int pnum;
243 
244 	ubi_assert(rwsem_is_locked(&ubi->fm_eba_sem));
245 
246 	if (pool->used == pool->size) {
247 		/* We cannot update the fastmap here because this
248 		 * function is called in atomic context.
249 		 * Let's fail here and refill/update it as soon as possible. */
250 		if (!ubi->fm_work_scheduled) {
251 			ubi->fm_work_scheduled = 1;
252 			schedule_work(&ubi->fm_work);
253 		}
254 		return NULL;
255 	}
256 
257 	pnum = pool->pebs[pool->used++];
258 	return ubi->lookuptbl[pnum];
259 }
260 
261 /**
262  * ubi_ensure_anchor_pebs - schedule wear-leveling to produce an anchor PEB.
263  * @ubi: UBI device description object
264  */
265 int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
266 {
267 	struct ubi_work *wrk;
268 	struct ubi_wl_entry *anchor;
269 
270 	spin_lock(&ubi->wl_lock);
271 
272 	/* Do we already have an anchor? */
273 	if (ubi->fm_anchor) {
274 		spin_unlock(&ubi->wl_lock);
275 		return 0;
276 	}
277 
278 	/* See if we can find an anchor PEB on the list of free PEBs */
279 	anchor = ubi_wl_get_fm_peb(ubi, 1);
280 	if (anchor) {
281 		ubi->fm_anchor = anchor;
282 		spin_unlock(&ubi->wl_lock);
283 		return 0;
284 	}
285 
286 	/* No luck, trigger wear leveling to produce a new anchor PEB */
287 	ubi->fm_do_produce_anchor = 1;
288 	if (ubi->wl_scheduled) {
289 		spin_unlock(&ubi->wl_lock);
290 		return 0;
291 	}
292 	ubi->wl_scheduled = 1;
293 	spin_unlock(&ubi->wl_lock);
294 
295 	wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
296 	if (!wrk) {
297 		spin_lock(&ubi->wl_lock);
298 		ubi->wl_scheduled = 0;
299 		spin_unlock(&ubi->wl_lock);
300 		return -ENOMEM;
301 	}
302 
303 	wrk->func = &wear_leveling_worker;
304 	__schedule_ubi_work(ubi, wrk);
305 	return 0;
306 }
307 
308 /**
309  * ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling
310  * sub-system.
311  * see: ubi_wl_put_peb()
312  *
313  * @ubi: UBI device description object
314  * @fm_e: physical eraseblock to return
315  * @lnum: the last used logical eraseblock number for the PEB
316  * @torture: if this physical eraseblock has to be tortured
317  */
318 int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
319 		      int lnum, int torture)
320 {
321 	struct ubi_wl_entry *e;
322 	int vol_id, pnum = fm_e->pnum;
323 
324 	dbg_wl("PEB %d", pnum);
325 
326 	ubi_assert(pnum >= 0);
327 	ubi_assert(pnum < ubi->peb_count);
328 
329 	spin_lock(&ubi->wl_lock);
330 	e = ubi->lookuptbl[pnum];
331 
332 	/* This can happen if we recovered from a fastmap the very
333 	 * first time and writing now a new one. In this case the wl system
334 	 * has never seen any PEB used by the original fastmap.
335 	 */
336 	if (!e) {
337 		e = fm_e;
338 		ubi_assert(e->ec >= 0);
339 		ubi->lookuptbl[pnum] = e;
340 	}
341 
342 	spin_unlock(&ubi->wl_lock);
343 
344 	vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
345 	return schedule_erase(ubi, e, vol_id, lnum, torture, true);
346 }
347 
348 /**
349  * ubi_is_erase_work - checks whether a work is erase work.
350  * @wrk: The work object to be checked
351  */
352 int ubi_is_erase_work(struct ubi_work *wrk)
353 {
354 	return wrk->func == erase_worker;
355 }
356 
357 static void ubi_fastmap_close(struct ubi_device *ubi)
358 {
359 	int i;
360 
361 	return_unused_pool_pebs(ubi, &ubi->fm_pool);
362 	return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
363 
364 	if (ubi->fm) {
365 		for (i = 0; i < ubi->fm->used_blocks; i++)
366 			kfree(ubi->fm->e[i]);
367 	}
368 	kfree(ubi->fm);
369 }
370 
371 /**
372  * may_reserve_for_fm - tests whether a PEB shall be reserved for fastmap.
373  * See find_mean_wl_entry()
374  *
375  * @ubi: UBI device description object
376  * @e: physical eraseblock to return
377  * @root: RB tree to test against.
378  */
379 static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
380 					   struct ubi_wl_entry *e,
381 					   struct rb_root *root) {
382 	if (e && !ubi->fm_disabled && !ubi->fm &&
383 	    e->pnum < UBI_FM_MAX_START)
384 		e = rb_entry(rb_next(root->rb_node),
385 			     struct ubi_wl_entry, u.rb);
386 
387 	return e;
388 }
389