xref: /openbmc/linux/drivers/mtd/ubi/fastmap-wl.c (revision 76f9476e)
150acfb2bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
278d6d497SRichard Weinberger /*
378d6d497SRichard Weinberger  * Copyright (c) 2012 Linutronix GmbH
478d6d497SRichard Weinberger  * Copyright (c) 2014 sigma star gmbh
578d6d497SRichard Weinberger  * Author: Richard Weinberger <richard@nod.at>
678d6d497SRichard Weinberger  */
778d6d497SRichard Weinberger 
878d6d497SRichard Weinberger /**
978d6d497SRichard Weinberger  * update_fastmap_work_fn - calls ubi_update_fastmap from a work queue
1078d6d497SRichard Weinberger  * @wrk: the work description object
1178d6d497SRichard Weinberger  */
update_fastmap_work_fn(struct work_struct * wrk)1278d6d497SRichard Weinberger static void update_fastmap_work_fn(struct work_struct *wrk)
1378d6d497SRichard Weinberger {
1478d6d497SRichard Weinberger 	struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
151841fcfdSRichard Weinberger 
1678d6d497SRichard Weinberger 	ubi_update_fastmap(ubi);
1778d6d497SRichard Weinberger 	spin_lock(&ubi->wl_lock);
1878d6d497SRichard Weinberger 	ubi->fm_work_scheduled = 0;
1978d6d497SRichard Weinberger 	spin_unlock(&ubi->wl_lock);
2078d6d497SRichard Weinberger }
2178d6d497SRichard Weinberger 
2278d6d497SRichard Weinberger /**
2378d6d497SRichard Weinberger  * find_anchor_wl_entry - find wear-leveling entry to used as anchor PEB.
2478d6d497SRichard Weinberger  * @root: the RB-tree where to look for
2578d6d497SRichard Weinberger  */
find_anchor_wl_entry(struct rb_root * root)2678d6d497SRichard Weinberger static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
2778d6d497SRichard Weinberger {
2878d6d497SRichard Weinberger 	struct rb_node *p;
2978d6d497SRichard Weinberger 	struct ubi_wl_entry *e, *victim = NULL;
3078d6d497SRichard Weinberger 	int max_ec = UBI_MAX_ERASECOUNTER;
3178d6d497SRichard Weinberger 
3278d6d497SRichard Weinberger 	ubi_rb_for_each_entry(p, e, root, u.rb) {
3378d6d497SRichard Weinberger 		if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) {
3478d6d497SRichard Weinberger 			victim = e;
3578d6d497SRichard Weinberger 			max_ec = e->ec;
3678d6d497SRichard Weinberger 		}
3778d6d497SRichard Weinberger 	}
3878d6d497SRichard Weinberger 
3978d6d497SRichard Weinberger 	return victim;
4078d6d497SRichard Weinberger }
4178d6d497SRichard Weinberger 
return_unused_peb(struct ubi_device * ubi,struct ubi_wl_entry * e)42c16f39d1SHou Tao static inline void return_unused_peb(struct ubi_device *ubi,
43c16f39d1SHou Tao 				     struct ubi_wl_entry *e)
44c16f39d1SHou Tao {
45c16f39d1SHou Tao 	wl_tree_add(e, &ubi->free);
46c16f39d1SHou Tao 	ubi->free_count++;
47c16f39d1SHou Tao }
48c16f39d1SHou Tao 
4978d6d497SRichard Weinberger /**
5078d6d497SRichard Weinberger  * return_unused_pool_pebs - returns unused PEB to the free tree.
5178d6d497SRichard Weinberger  * @ubi: UBI device description object
5278d6d497SRichard Weinberger  * @pool: fastmap pool description object
5378d6d497SRichard Weinberger  */
return_unused_pool_pebs(struct ubi_device * ubi,struct ubi_fm_pool * pool)5478d6d497SRichard Weinberger static void return_unused_pool_pebs(struct ubi_device *ubi,
5578d6d497SRichard Weinberger 				    struct ubi_fm_pool *pool)
5678d6d497SRichard Weinberger {
5778d6d497SRichard Weinberger 	int i;
5878d6d497SRichard Weinberger 	struct ubi_wl_entry *e;
5978d6d497SRichard Weinberger 
6078d6d497SRichard Weinberger 	for (i = pool->used; i < pool->size; i++) {
6178d6d497SRichard Weinberger 		e = ubi->lookuptbl[pool->pebs[i]];
62c16f39d1SHou Tao 		return_unused_peb(ubi, e);
6378d6d497SRichard Weinberger 	}
6478d6d497SRichard Weinberger }
6578d6d497SRichard Weinberger 
6678d6d497SRichard Weinberger /**
6778d6d497SRichard Weinberger  * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
6878d6d497SRichard Weinberger  * @ubi: UBI device description object
6978d6d497SRichard Weinberger  * @anchor: This PEB will be used as anchor PEB by fastmap
7078d6d497SRichard Weinberger  *
7178d6d497SRichard Weinberger  * The function returns a physical erase block with a given maximal number
7278d6d497SRichard Weinberger  * and removes it from the wl subsystem.
7378d6d497SRichard Weinberger  * Must be called with wl_lock held!
7478d6d497SRichard Weinberger  */
ubi_wl_get_fm_peb(struct ubi_device * ubi,int anchor)7578d6d497SRichard Weinberger struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
7678d6d497SRichard Weinberger {
7778d6d497SRichard Weinberger 	struct ubi_wl_entry *e = NULL;
7878d6d497SRichard Weinberger 
7978d6d497SRichard Weinberger 	if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
8078d6d497SRichard Weinberger 		goto out;
8178d6d497SRichard Weinberger 
8278d6d497SRichard Weinberger 	if (anchor)
8378d6d497SRichard Weinberger 		e = find_anchor_wl_entry(&ubi->free);
8478d6d497SRichard Weinberger 	else
8578d6d497SRichard Weinberger 		e = find_mean_wl_entry(ubi, &ubi->free);
8678d6d497SRichard Weinberger 
8778d6d497SRichard Weinberger 	if (!e)
8878d6d497SRichard Weinberger 		goto out;
8978d6d497SRichard Weinberger 
9078d6d497SRichard Weinberger 	self_check_in_wl_tree(ubi, e, &ubi->free);
9178d6d497SRichard Weinberger 
9278d6d497SRichard Weinberger 	/* remove it from the free list,
9378d6d497SRichard Weinberger 	 * the wl subsystem does no longer know this erase block */
9478d6d497SRichard Weinberger 	rb_erase(&e->u.rb, &ubi->free);
9578d6d497SRichard Weinberger 	ubi->free_count--;
9678d6d497SRichard Weinberger out:
9778d6d497SRichard Weinberger 	return e;
9878d6d497SRichard Weinberger }
9978d6d497SRichard Weinberger 
100d09e9a2bSZhihao Cheng /*
101d09e9a2bSZhihao Cheng  * has_enough_free_count - whether ubi has enough free pebs to fill fm pools
102d09e9a2bSZhihao Cheng  * @ubi: UBI device description object
103d09e9a2bSZhihao Cheng  * @is_wl_pool: whether UBI is filling wear leveling pool
104d09e9a2bSZhihao Cheng  *
105d09e9a2bSZhihao Cheng  * This helper function checks whether there are enough free pebs (deducted
106d09e9a2bSZhihao Cheng  * by fastmap pebs) to fill fm_pool and fm_wl_pool, above rule works after
107d09e9a2bSZhihao Cheng  * there is at least one of free pebs is filled into fm_wl_pool.
108d09e9a2bSZhihao Cheng  * For wear leveling pool, UBI should also reserve free pebs for bad pebs
109d09e9a2bSZhihao Cheng  * handling, because there maybe no enough free pebs for user volumes after
110d09e9a2bSZhihao Cheng  * producing new bad pebs.
111d09e9a2bSZhihao Cheng  */
has_enough_free_count(struct ubi_device * ubi,bool is_wl_pool)112d09e9a2bSZhihao Cheng static bool has_enough_free_count(struct ubi_device *ubi, bool is_wl_pool)
113d09e9a2bSZhihao Cheng {
114d09e9a2bSZhihao Cheng 	int fm_used = 0;	// fastmap non anchor pebs.
115d09e9a2bSZhihao Cheng 	int beb_rsvd_pebs;
116d09e9a2bSZhihao Cheng 
117d09e9a2bSZhihao Cheng 	if (!ubi->free.rb_node)
118d09e9a2bSZhihao Cheng 		return false;
119d09e9a2bSZhihao Cheng 
120d09e9a2bSZhihao Cheng 	beb_rsvd_pebs = is_wl_pool ? ubi->beb_rsvd_pebs : 0;
121d09e9a2bSZhihao Cheng 	if (ubi->fm_wl_pool.size > 0 && !(ubi->ro_mode || ubi->fm_disabled))
122d09e9a2bSZhihao Cheng 		fm_used = ubi->fm_size / ubi->leb_size - 1;
123d09e9a2bSZhihao Cheng 
124d09e9a2bSZhihao Cheng 	return ubi->free_count - beb_rsvd_pebs > fm_used;
125d09e9a2bSZhihao Cheng }
126d09e9a2bSZhihao Cheng 
12778d6d497SRichard Weinberger /**
12878d6d497SRichard Weinberger  * ubi_refill_pools - refills all fastmap PEB pools.
12978d6d497SRichard Weinberger  * @ubi: UBI device description object
13078d6d497SRichard Weinberger  */
ubi_refill_pools(struct ubi_device * ubi)13178d6d497SRichard Weinberger void ubi_refill_pools(struct ubi_device *ubi)
13278d6d497SRichard Weinberger {
13378d6d497SRichard Weinberger 	struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
13478d6d497SRichard Weinberger 	struct ubi_fm_pool *pool = &ubi->fm_pool;
13578d6d497SRichard Weinberger 	struct ubi_wl_entry *e;
13678d6d497SRichard Weinberger 	int enough;
13778d6d497SRichard Weinberger 
13878d6d497SRichard Weinberger 	spin_lock(&ubi->wl_lock);
13978d6d497SRichard Weinberger 
14078d6d497SRichard Weinberger 	return_unused_pool_pebs(ubi, wl_pool);
14178d6d497SRichard Weinberger 	return_unused_pool_pebs(ubi, pool);
14278d6d497SRichard Weinberger 
14378d6d497SRichard Weinberger 	wl_pool->size = 0;
14478d6d497SRichard Weinberger 	pool->size = 0;
14578d6d497SRichard Weinberger 
1464b68bf9aSArne Edholm 	if (ubi->fm_anchor) {
1474b68bf9aSArne Edholm 		wl_tree_add(ubi->fm_anchor, &ubi->free);
1484b68bf9aSArne Edholm 		ubi->free_count++;
149*76f9476eSZhihao Cheng 		ubi->fm_anchor = NULL;
1504b68bf9aSArne Edholm 	}
1514b68bf9aSArne Edholm 
152*76f9476eSZhihao Cheng 	if (!ubi->fm_disabled)
153d09e9a2bSZhihao Cheng 		/*
154d09e9a2bSZhihao Cheng 		 * All available PEBs are in ubi->free, now is the time to get
1554b68bf9aSArne Edholm 		 * the best anchor PEBs.
1564b68bf9aSArne Edholm 		 */
1574b68bf9aSArne Edholm 		ubi->fm_anchor = ubi_wl_get_fm_peb(ubi, 1);
1584b68bf9aSArne Edholm 
15978d6d497SRichard Weinberger 	for (;;) {
16078d6d497SRichard Weinberger 		enough = 0;
16178d6d497SRichard Weinberger 		if (pool->size < pool->max_size) {
162d09e9a2bSZhihao Cheng 			if (!has_enough_free_count(ubi, false))
16378d6d497SRichard Weinberger 				break;
16478d6d497SRichard Weinberger 
16578d6d497SRichard Weinberger 			e = wl_get_wle(ubi);
16678d6d497SRichard Weinberger 			if (!e)
16778d6d497SRichard Weinberger 				break;
16878d6d497SRichard Weinberger 
16978d6d497SRichard Weinberger 			pool->pebs[pool->size] = e->pnum;
17078d6d497SRichard Weinberger 			pool->size++;
17178d6d497SRichard Weinberger 		} else
17278d6d497SRichard Weinberger 			enough++;
17378d6d497SRichard Weinberger 
17478d6d497SRichard Weinberger 		if (wl_pool->size < wl_pool->max_size) {
175d09e9a2bSZhihao Cheng 			if (!has_enough_free_count(ubi, true))
17678d6d497SRichard Weinberger 				break;
17778d6d497SRichard Weinberger 
17878d6d497SRichard Weinberger 			e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
17978d6d497SRichard Weinberger 			self_check_in_wl_tree(ubi, e, &ubi->free);
18078d6d497SRichard Weinberger 			rb_erase(&e->u.rb, &ubi->free);
18178d6d497SRichard Weinberger 			ubi->free_count--;
18278d6d497SRichard Weinberger 
18378d6d497SRichard Weinberger 			wl_pool->pebs[wl_pool->size] = e->pnum;
18478d6d497SRichard Weinberger 			wl_pool->size++;
18578d6d497SRichard Weinberger 		} else
18678d6d497SRichard Weinberger 			enough++;
18778d6d497SRichard Weinberger 
18878d6d497SRichard Weinberger 		if (enough == 2)
18978d6d497SRichard Weinberger 			break;
19078d6d497SRichard Weinberger 	}
19178d6d497SRichard Weinberger 
19278d6d497SRichard Weinberger 	wl_pool->used = 0;
19378d6d497SRichard Weinberger 	pool->used = 0;
19478d6d497SRichard Weinberger 
19578d6d497SRichard Weinberger 	spin_unlock(&ubi->wl_lock);
19678d6d497SRichard Weinberger }
19778d6d497SRichard Weinberger 
19878d6d497SRichard Weinberger /**
1991cb8f977SRichard Weinberger  * produce_free_peb - produce a free physical eraseblock.
2001cb8f977SRichard Weinberger  * @ubi: UBI device description object
2011cb8f977SRichard Weinberger  *
2021cb8f977SRichard Weinberger  * This function tries to make a free PEB by means of synchronous execution of
2031cb8f977SRichard Weinberger  * pending works. This may be needed if, for example the background thread is
2041cb8f977SRichard Weinberger  * disabled. Returns zero in case of success and a negative error code in case
2051cb8f977SRichard Weinberger  * of failure.
2061cb8f977SRichard Weinberger  */
produce_free_peb(struct ubi_device * ubi)2071cb8f977SRichard Weinberger static int produce_free_peb(struct ubi_device *ubi)
2081cb8f977SRichard Weinberger {
2091cb8f977SRichard Weinberger 	int err;
2101cb8f977SRichard Weinberger 
2111cb8f977SRichard Weinberger 	while (!ubi->free.rb_node && ubi->works_count) {
2121cb8f977SRichard Weinberger 		dbg_wl("do one work synchronously");
2131cb8f977SRichard Weinberger 		err = do_work(ubi);
2141cb8f977SRichard Weinberger 
2151cb8f977SRichard Weinberger 		if (err)
2161cb8f977SRichard Weinberger 			return err;
2171cb8f977SRichard Weinberger 	}
2181cb8f977SRichard Weinberger 
2191cb8f977SRichard Weinberger 	return 0;
2201cb8f977SRichard Weinberger }
2211cb8f977SRichard Weinberger 
2221cb8f977SRichard Weinberger /**
22378d6d497SRichard Weinberger  * ubi_wl_get_peb - get a physical eraseblock.
22478d6d497SRichard Weinberger  * @ubi: UBI device description object
22578d6d497SRichard Weinberger  *
22678d6d497SRichard Weinberger  * This function returns a physical eraseblock in case of success and a
22778d6d497SRichard Weinberger  * negative error code in case of failure.
22878d6d497SRichard Weinberger  * Returns with ubi->fm_eba_sem held in read mode!
22978d6d497SRichard Weinberger  */
ubi_wl_get_peb(struct ubi_device * ubi)23078d6d497SRichard Weinberger int ubi_wl_get_peb(struct ubi_device *ubi)
23178d6d497SRichard Weinberger {
2328615b94fSZhihao Cheng 	int ret, attempts = 0;
23378d6d497SRichard Weinberger 	struct ubi_fm_pool *pool = &ubi->fm_pool;
23478d6d497SRichard Weinberger 	struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
23578d6d497SRichard Weinberger 
23678d6d497SRichard Weinberger again:
23778d6d497SRichard Weinberger 	down_read(&ubi->fm_eba_sem);
23878d6d497SRichard Weinberger 	spin_lock(&ubi->wl_lock);
23978d6d497SRichard Weinberger 
24078d6d497SRichard Weinberger 	/* We check here also for the WL pool because at this point we can
24178d6d497SRichard Weinberger 	 * refill the WL pool synchronous. */
24278d6d497SRichard Weinberger 	if (pool->used == pool->size || wl_pool->used == wl_pool->size) {
24378d6d497SRichard Weinberger 		spin_unlock(&ubi->wl_lock);
24478d6d497SRichard Weinberger 		up_read(&ubi->fm_eba_sem);
24578d6d497SRichard Weinberger 		ret = ubi_update_fastmap(ubi);
24678d6d497SRichard Weinberger 		if (ret) {
24778d6d497SRichard Weinberger 			ubi_msg(ubi, "Unable to write a new fastmap: %i", ret);
24878d6d497SRichard Weinberger 			down_read(&ubi->fm_eba_sem);
24978d6d497SRichard Weinberger 			return -ENOSPC;
25078d6d497SRichard Weinberger 		}
25178d6d497SRichard Weinberger 		down_read(&ubi->fm_eba_sem);
25278d6d497SRichard Weinberger 		spin_lock(&ubi->wl_lock);
25378d6d497SRichard Weinberger 	}
25478d6d497SRichard Weinberger 
25578d6d497SRichard Weinberger 	if (pool->used == pool->size) {
25678d6d497SRichard Weinberger 		spin_unlock(&ubi->wl_lock);
2578615b94fSZhihao Cheng 		attempts++;
2588615b94fSZhihao Cheng 		if (attempts == 10) {
25978d6d497SRichard Weinberger 			ubi_err(ubi, "Unable to get a free PEB from user WL pool");
26078d6d497SRichard Weinberger 			ret = -ENOSPC;
26178d6d497SRichard Weinberger 			goto out;
26278d6d497SRichard Weinberger 		}
26378d6d497SRichard Weinberger 		up_read(&ubi->fm_eba_sem);
2641cb8f977SRichard Weinberger 		ret = produce_free_peb(ubi);
2651cb8f977SRichard Weinberger 		if (ret < 0) {
2661cb8f977SRichard Weinberger 			down_read(&ubi->fm_eba_sem);
2671cb8f977SRichard Weinberger 			goto out;
2681cb8f977SRichard Weinberger 		}
26978d6d497SRichard Weinberger 		goto again;
27078d6d497SRichard Weinberger 	}
27178d6d497SRichard Weinberger 
27278d6d497SRichard Weinberger 	ubi_assert(pool->used < pool->size);
27378d6d497SRichard Weinberger 	ret = pool->pebs[pool->used++];
27478d6d497SRichard Weinberger 	prot_queue_add(ubi, ubi->lookuptbl[ret]);
27578d6d497SRichard Weinberger 	spin_unlock(&ubi->wl_lock);
27678d6d497SRichard Weinberger out:
27778d6d497SRichard Weinberger 	return ret;
27878d6d497SRichard Weinberger }
27978d6d497SRichard Weinberger 
28014072ee3SZhihao Cheng /**
28114072ee3SZhihao Cheng  * next_peb_for_wl - returns next PEB to be used internally by the
28214072ee3SZhihao Cheng  * WL sub-system.
28314072ee3SZhihao Cheng  *
28414072ee3SZhihao Cheng  * @ubi: UBI device description object
28514072ee3SZhihao Cheng  */
next_peb_for_wl(struct ubi_device * ubi)28614072ee3SZhihao Cheng static struct ubi_wl_entry *next_peb_for_wl(struct ubi_device *ubi)
28714072ee3SZhihao Cheng {
28814072ee3SZhihao Cheng 	struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
28914072ee3SZhihao Cheng 	int pnum;
29014072ee3SZhihao Cheng 
29114072ee3SZhihao Cheng 	if (pool->used == pool->size)
29214072ee3SZhihao Cheng 		return NULL;
29314072ee3SZhihao Cheng 
29414072ee3SZhihao Cheng 	pnum = pool->pebs[pool->used];
29514072ee3SZhihao Cheng 	return ubi->lookuptbl[pnum];
29614072ee3SZhihao Cheng }
29714072ee3SZhihao Cheng 
29814072ee3SZhihao Cheng /**
29914072ee3SZhihao Cheng  * need_wear_leveling - checks whether to trigger a wear leveling work.
30014072ee3SZhihao Cheng  * UBI fetches free PEB from wl_pool, we check free PEBs from both 'wl_pool'
30114072ee3SZhihao Cheng  * and 'ubi->free', because free PEB in 'ubi->free' tree maybe moved into
30214072ee3SZhihao Cheng  * 'wl_pool' by ubi_refill_pools().
30314072ee3SZhihao Cheng  *
30414072ee3SZhihao Cheng  * @ubi: UBI device description object
30514072ee3SZhihao Cheng  */
need_wear_leveling(struct ubi_device * ubi)30614072ee3SZhihao Cheng static bool need_wear_leveling(struct ubi_device *ubi)
30714072ee3SZhihao Cheng {
30814072ee3SZhihao Cheng 	int ec;
30914072ee3SZhihao Cheng 	struct ubi_wl_entry *e;
31014072ee3SZhihao Cheng 
31114072ee3SZhihao Cheng 	if (!ubi->used.rb_node)
31214072ee3SZhihao Cheng 		return false;
31314072ee3SZhihao Cheng 
31414072ee3SZhihao Cheng 	e = next_peb_for_wl(ubi);
31514072ee3SZhihao Cheng 	if (!e) {
31614072ee3SZhihao Cheng 		if (!ubi->free.rb_node)
31714072ee3SZhihao Cheng 			return false;
31814072ee3SZhihao Cheng 		e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
31914072ee3SZhihao Cheng 		ec = e->ec;
32014072ee3SZhihao Cheng 	} else {
32114072ee3SZhihao Cheng 		ec = e->ec;
32214072ee3SZhihao Cheng 		if (ubi->free.rb_node) {
32314072ee3SZhihao Cheng 			e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
32414072ee3SZhihao Cheng 			ec = max(ec, e->ec);
32514072ee3SZhihao Cheng 		}
32614072ee3SZhihao Cheng 	}
32714072ee3SZhihao Cheng 	e = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
32814072ee3SZhihao Cheng 
32914072ee3SZhihao Cheng 	return ec - e->ec >= UBI_WL_THRESHOLD;
33014072ee3SZhihao Cheng }
33114072ee3SZhihao Cheng 
33278d6d497SRichard Weinberger /* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system.
33378d6d497SRichard Weinberger  *
33478d6d497SRichard Weinberger  * @ubi: UBI device description object
33578d6d497SRichard Weinberger  */
get_peb_for_wl(struct ubi_device * ubi)33678d6d497SRichard Weinberger static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
33778d6d497SRichard Weinberger {
33878d6d497SRichard Weinberger 	struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
33978d6d497SRichard Weinberger 	int pnum;
34078d6d497SRichard Weinberger 
3412e8f08deSRichard Weinberger 	ubi_assert(rwsem_is_locked(&ubi->fm_eba_sem));
3422e8f08deSRichard Weinberger 
34378d6d497SRichard Weinberger 	if (pool->used == pool->size) {
34478d6d497SRichard Weinberger 		/* We cannot update the fastmap here because this
34578d6d497SRichard Weinberger 		 * function is called in atomic context.
34678d6d497SRichard Weinberger 		 * Let's fail here and refill/update it as soon as possible. */
34778d6d497SRichard Weinberger 		if (!ubi->fm_work_scheduled) {
34878d6d497SRichard Weinberger 			ubi->fm_work_scheduled = 1;
34978d6d497SRichard Weinberger 			schedule_work(&ubi->fm_work);
35078d6d497SRichard Weinberger 		}
35178d6d497SRichard Weinberger 		return NULL;
352e1bc37ceSRichard Weinberger 	}
353e1bc37ceSRichard Weinberger 
35478d6d497SRichard Weinberger 	pnum = pool->pebs[pool->used++];
35578d6d497SRichard Weinberger 	return ubi->lookuptbl[pnum];
35678d6d497SRichard Weinberger }
35778d6d497SRichard Weinberger 
35878d6d497SRichard Weinberger /**
35978d6d497SRichard Weinberger  * ubi_ensure_anchor_pebs - schedule wear-leveling to produce an anchor PEB.
36078d6d497SRichard Weinberger  * @ubi: UBI device description object
36178d6d497SRichard Weinberger  */
ubi_ensure_anchor_pebs(struct ubi_device * ubi)36278d6d497SRichard Weinberger int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
36378d6d497SRichard Weinberger {
36478d6d497SRichard Weinberger 	struct ubi_work *wrk;
365d09e9a2bSZhihao Cheng 	struct ubi_wl_entry *anchor;
36678d6d497SRichard Weinberger 
36778d6d497SRichard Weinberger 	spin_lock(&ubi->wl_lock);
368f9c34bb5SSascha Hauer 
369d09e9a2bSZhihao Cheng 	/* Do we already have an anchor? */
370d09e9a2bSZhihao Cheng 	if (ubi->fm_anchor) {
371d09e9a2bSZhihao Cheng 		spin_unlock(&ubi->wl_lock);
372d09e9a2bSZhihao Cheng 		return 0;
3734b68bf9aSArne Edholm 	}
3744b68bf9aSArne Edholm 
375d09e9a2bSZhihao Cheng 	/* See if we can find an anchor PEB on the list of free PEBs */
376d09e9a2bSZhihao Cheng 	anchor = ubi_wl_get_fm_peb(ubi, 1);
377d09e9a2bSZhihao Cheng 	if (anchor) {
378d09e9a2bSZhihao Cheng 		ubi->fm_anchor = anchor;
379d09e9a2bSZhihao Cheng 		spin_unlock(&ubi->wl_lock);
380d09e9a2bSZhihao Cheng 		return 0;
381d09e9a2bSZhihao Cheng 	}
382d09e9a2bSZhihao Cheng 
383d09e9a2bSZhihao Cheng 	ubi->fm_do_produce_anchor = 1;
384d09e9a2bSZhihao Cheng 	/* No luck, trigger wear leveling to produce a new anchor PEB. */
38578d6d497SRichard Weinberger 	if (ubi->wl_scheduled) {
38678d6d497SRichard Weinberger 		spin_unlock(&ubi->wl_lock);
38778d6d497SRichard Weinberger 		return 0;
38878d6d497SRichard Weinberger 	}
38978d6d497SRichard Weinberger 	ubi->wl_scheduled = 1;
39078d6d497SRichard Weinberger 	spin_unlock(&ubi->wl_lock);
39178d6d497SRichard Weinberger 
39278d6d497SRichard Weinberger 	wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
39378d6d497SRichard Weinberger 	if (!wrk) {
39478d6d497SRichard Weinberger 		spin_lock(&ubi->wl_lock);
39578d6d497SRichard Weinberger 		ubi->wl_scheduled = 0;
39678d6d497SRichard Weinberger 		spin_unlock(&ubi->wl_lock);
39778d6d497SRichard Weinberger 		return -ENOMEM;
39878d6d497SRichard Weinberger 	}
39978d6d497SRichard Weinberger 
40078d6d497SRichard Weinberger 	wrk->func = &wear_leveling_worker;
4012e8f08deSRichard Weinberger 	__schedule_ubi_work(ubi, wrk);
40278d6d497SRichard Weinberger 	return 0;
40378d6d497SRichard Weinberger }
40478d6d497SRichard Weinberger 
40578d6d497SRichard Weinberger /**
40678d6d497SRichard Weinberger  * ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling
40778d6d497SRichard Weinberger  * sub-system.
40878d6d497SRichard Weinberger  * see: ubi_wl_put_peb()
40978d6d497SRichard Weinberger  *
41078d6d497SRichard Weinberger  * @ubi: UBI device description object
41178d6d497SRichard Weinberger  * @fm_e: physical eraseblock to return
41278d6d497SRichard Weinberger  * @lnum: the last used logical eraseblock number for the PEB
41378d6d497SRichard Weinberger  * @torture: if this physical eraseblock has to be tortured
41478d6d497SRichard Weinberger  */
ubi_wl_put_fm_peb(struct ubi_device * ubi,struct ubi_wl_entry * fm_e,int lnum,int torture)41578d6d497SRichard Weinberger int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
41678d6d497SRichard Weinberger 		      int lnum, int torture)
41778d6d497SRichard Weinberger {
41878d6d497SRichard Weinberger 	struct ubi_wl_entry *e;
41978d6d497SRichard Weinberger 	int vol_id, pnum = fm_e->pnum;
42078d6d497SRichard Weinberger 
42178d6d497SRichard Weinberger 	dbg_wl("PEB %d", pnum);
42278d6d497SRichard Weinberger 
42378d6d497SRichard Weinberger 	ubi_assert(pnum >= 0);
42478d6d497SRichard Weinberger 	ubi_assert(pnum < ubi->peb_count);
42578d6d497SRichard Weinberger 
42678d6d497SRichard Weinberger 	spin_lock(&ubi->wl_lock);
42778d6d497SRichard Weinberger 	e = ubi->lookuptbl[pnum];
42878d6d497SRichard Weinberger 
42978d6d497SRichard Weinberger 	/* This can happen if we recovered from a fastmap the very
43078d6d497SRichard Weinberger 	 * first time and writing now a new one. In this case the wl system
43178d6d497SRichard Weinberger 	 * has never seen any PEB used by the original fastmap.
43278d6d497SRichard Weinberger 	 */
43378d6d497SRichard Weinberger 	if (!e) {
43478d6d497SRichard Weinberger 		e = fm_e;
43578d6d497SRichard Weinberger 		ubi_assert(e->ec >= 0);
43678d6d497SRichard Weinberger 		ubi->lookuptbl[pnum] = e;
43778d6d497SRichard Weinberger 	}
43878d6d497SRichard Weinberger 
43978d6d497SRichard Weinberger 	spin_unlock(&ubi->wl_lock);
44078d6d497SRichard Weinberger 
44178d6d497SRichard Weinberger 	vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
4422e8f08deSRichard Weinberger 	return schedule_erase(ubi, e, vol_id, lnum, torture, true);
44378d6d497SRichard Weinberger }
44478d6d497SRichard Weinberger 
44578d6d497SRichard Weinberger /**
44678d6d497SRichard Weinberger  * ubi_is_erase_work - checks whether a work is erase work.
44778d6d497SRichard Weinberger  * @wrk: The work object to be checked
44878d6d497SRichard Weinberger  */
ubi_is_erase_work(struct ubi_work * wrk)44978d6d497SRichard Weinberger int ubi_is_erase_work(struct ubi_work *wrk)
45078d6d497SRichard Weinberger {
45178d6d497SRichard Weinberger 	return wrk->func == erase_worker;
45278d6d497SRichard Weinberger }
45378d6d497SRichard Weinberger 
ubi_fastmap_close(struct ubi_device * ubi)45478d6d497SRichard Weinberger static void ubi_fastmap_close(struct ubi_device *ubi)
45578d6d497SRichard Weinberger {
45678d6d497SRichard Weinberger 	int i;
45778d6d497SRichard Weinberger 
45878d6d497SRichard Weinberger 	return_unused_pool_pebs(ubi, &ubi->fm_pool);
45978d6d497SRichard Weinberger 	return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
46078d6d497SRichard Weinberger 
461c16f39d1SHou Tao 	if (ubi->fm_anchor) {
462c16f39d1SHou Tao 		return_unused_peb(ubi, ubi->fm_anchor);
463c16f39d1SHou Tao 		ubi->fm_anchor = NULL;
464c16f39d1SHou Tao 	}
465c16f39d1SHou Tao 
46678d6d497SRichard Weinberger 	if (ubi->fm) {
46778d6d497SRichard Weinberger 		for (i = 0; i < ubi->fm->used_blocks; i++)
46878d6d497SRichard Weinberger 			kfree(ubi->fm->e[i]);
46978d6d497SRichard Weinberger 	}
47078d6d497SRichard Weinberger 	kfree(ubi->fm);
47178d6d497SRichard Weinberger }
4722f84c246SRichard Weinberger 
4732f84c246SRichard Weinberger /**
4742f84c246SRichard Weinberger  * may_reserve_for_fm - tests whether a PEB shall be reserved for fastmap.
4752f84c246SRichard Weinberger  * See find_mean_wl_entry()
4762f84c246SRichard Weinberger  *
4772f84c246SRichard Weinberger  * @ubi: UBI device description object
4782f84c246SRichard Weinberger  * @e: physical eraseblock to return
4792f84c246SRichard Weinberger  * @root: RB tree to test against.
4802f84c246SRichard Weinberger  */
may_reserve_for_fm(struct ubi_device * ubi,struct ubi_wl_entry * e,struct rb_root * root)4812f84c246SRichard Weinberger static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
4822f84c246SRichard Weinberger 					   struct ubi_wl_entry *e,
4832f84c246SRichard Weinberger 					   struct rb_root *root) {
4842f84c246SRichard Weinberger 	if (e && !ubi->fm_disabled && !ubi->fm &&
4852f84c246SRichard Weinberger 	    e->pnum < UBI_FM_MAX_START)
4862f84c246SRichard Weinberger 		e = rb_entry(rb_next(root->rb_node),
4872f84c246SRichard Weinberger 			     struct ubi_wl_entry, u.rb);
4882f84c246SRichard Weinberger 
4892f84c246SRichard Weinberger 	return e;
4902f84c246SRichard Weinberger }
491