Lines Matching full:ubi

9  * UBI wear-leveling sub-system.
23 * done asynchronously in context of the per-UBI device background thread,
37 * As it was said, for the UBI sub-system all physical eraseblocks are either
92 #include "ubi.h"
125 static int self_check_ec(struct ubi_device *ubi, int pnum, int ec);
126 static int self_check_in_wl_tree(const struct ubi_device *ubi,
128 static int self_check_in_pq(const struct ubi_device *ubi,
137 * the @ubi->used and @ubi->free RB-trees.
169 * @ubi: UBI device description object
175 static void wl_entry_destroy(struct ubi_device *ubi, struct ubi_wl_entry *e) in wl_entry_destroy() argument
177 ubi->lookuptbl[e->pnum] = NULL; in wl_entry_destroy()
183 * @ubi: UBI device description object
188 static int do_work(struct ubi_device *ubi) in do_work() argument
196 * @ubi->work_sem is used to synchronize with the workers. Workers take in do_work()
201 down_read(&ubi->work_sem); in do_work()
202 spin_lock(&ubi->wl_lock); in do_work()
203 if (list_empty(&ubi->works)) { in do_work()
204 spin_unlock(&ubi->wl_lock); in do_work()
205 up_read(&ubi->work_sem); in do_work()
209 wrk = list_entry(ubi->works.next, struct ubi_work, list); in do_work()
211 ubi->works_count -= 1; in do_work()
212 ubi_assert(ubi->works_count >= 0); in do_work()
213 spin_unlock(&ubi->wl_lock); in do_work()
220 err = wrk->func(ubi, wrk, 0); in do_work()
222 ubi_err(ubi, "work failed with error code %d", err); in do_work()
223 up_read(&ubi->work_sem); in do_work()
269 * @ubi: UBI device description object
275 static inline int in_pq(const struct ubi_device *ubi, struct ubi_wl_entry *e) in in_pq() argument
281 list_for_each_entry(p, &ubi->pq[i], u.list) in in_pq()
290 * @ubi: UBI device description object
293 * This function adds @e to the tail of the protection queue @ubi->pq, where
298 static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e) in prot_queue_add() argument
300 int pq_tail = ubi->pq_head - 1; in prot_queue_add()
305 list_add_tail(&e->u.list, &ubi->pq[pq_tail]); in prot_queue_add()
311 * @ubi: UBI device description object
318 static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi, in find_wl_entry() argument
346 * @ubi: UBI device description object
353 static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi, in find_mean_wl_entry() argument
367 e = may_reserve_for_fm(ubi, e, root); in find_mean_wl_entry()
369 e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2); in find_mean_wl_entry()
377 * @ubi: UBI device description object
382 static struct ubi_wl_entry *wl_get_wle(struct ubi_device *ubi) in wl_get_wle() argument
386 e = find_mean_wl_entry(ubi, &ubi->free); in wl_get_wle()
388 ubi_err(ubi, "no free eraseblocks"); in wl_get_wle()
392 self_check_in_wl_tree(ubi, e, &ubi->free); in wl_get_wle()
398 rb_erase(&e->u.rb, &ubi->free); in wl_get_wle()
399 ubi->free_count--; in wl_get_wle()
407 * @ubi: UBI device description object
413 static int prot_queue_del(struct ubi_device *ubi, int pnum) in prot_queue_del() argument
417 e = ubi->lookuptbl[pnum]; in prot_queue_del()
421 if (self_check_in_pq(ubi, e)) in prot_queue_del()
431 * @ubi: UBI device description object
438 static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, in sync_erase() argument
447 err = self_check_ec(ubi, e->pnum, e->ec); in sync_erase()
451 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS); in sync_erase()
455 err = ubi_io_sync_erase(ubi, e->pnum, torture); in sync_erase()
462 * Erase counter overflow. Upgrade UBI and use 64-bit in sync_erase()
465 ubi_err(ubi, "erase counter overflow at PEB %d, EC %llu", in sync_erase()
475 err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr); in sync_erase()
480 spin_lock(&ubi->wl_lock); in sync_erase()
481 if (e->ec > ubi->max_ec) in sync_erase()
482 ubi->max_ec = e->ec; in sync_erase()
483 spin_unlock(&ubi->wl_lock); in sync_erase()
492 * @ubi: UBI device description object
498 static void serve_prot_queue(struct ubi_device *ubi) in serve_prot_queue() argument
509 spin_lock(&ubi->wl_lock); in serve_prot_queue()
510 list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) { in serve_prot_queue()
515 wl_tree_add(e, &ubi->used); in serve_prot_queue()
521 spin_unlock(&ubi->wl_lock); in serve_prot_queue()
527 ubi->pq_head += 1; in serve_prot_queue()
528 if (ubi->pq_head == UBI_PROT_QUEUE_LEN) in serve_prot_queue()
529 ubi->pq_head = 0; in serve_prot_queue()
530 ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN); in serve_prot_queue()
531 spin_unlock(&ubi->wl_lock); in serve_prot_queue()
536 * @ubi: UBI device description object
540 * list. Can only be used if ubi->work_sem is already held in read mode!
542 static void __schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk) in __schedule_ubi_work() argument
544 spin_lock(&ubi->wl_lock); in __schedule_ubi_work()
545 list_add_tail(&wrk->list, &ubi->works); in __schedule_ubi_work()
546 ubi_assert(ubi->works_count >= 0); in __schedule_ubi_work()
547 ubi->works_count += 1; in __schedule_ubi_work()
548 if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled(ubi)) in __schedule_ubi_work()
549 wake_up_process(ubi->bgt_thread); in __schedule_ubi_work()
550 spin_unlock(&ubi->wl_lock); in __schedule_ubi_work()
555 * @ubi: UBI device description object
561 static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk) in schedule_ubi_work() argument
563 down_read(&ubi->work_sem); in schedule_ubi_work()
564 __schedule_ubi_work(ubi, wrk); in schedule_ubi_work()
565 up_read(&ubi->work_sem); in schedule_ubi_work()
568 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
573 * @ubi: UBI device description object
583 static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, in schedule_erase() argument
604 __schedule_ubi_work(ubi, wl_wrk); in schedule_erase()
606 schedule_ubi_work(ubi, wl_wrk); in schedule_erase()
610 static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk);
613 * @ubi: UBI device description object
620 static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, in do_sync_erase() argument
632 return __erase_worker(ubi, &wl_wrk); in do_sync_erase()
635 static int ensure_wear_leveling(struct ubi_device *ubi, int nested);
638 * @ubi: UBI device description object
647 static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, argument
661 vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
667 down_read(&ubi->fm_eba_sem);
668 mutex_lock(&ubi->move_mutex);
669 spin_lock(&ubi->wl_lock);
670 ubi_assert(!ubi->move_from && !ubi->move_to);
671 ubi_assert(!ubi->move_to_put);
674 if (!next_peb_for_wl(ubi, true) ||
676 if (!ubi->free.rb_node ||
678 (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
686 * @ubi->used tree later and the wear-leveling will be
690 !ubi->free.rb_node, !ubi->used.rb_node);
695 e1 = find_anchor_wl_entry(&ubi->used);
696 if (e1 && ubi->fm_anchor &&
697 (ubi->fm_anchor->ec - e1->ec >= UBI_WL_THRESHOLD)) {
698 ubi->fm_do_produce_anchor = 1;
704 wl_tree_add(ubi->fm_anchor, &ubi->free);
705 ubi->fm_anchor = NULL;
706 ubi->free_count++;
709 if (ubi->fm_do_produce_anchor) {
712 e2 = get_peb_for_wl(ubi);
716 self_check_in_wl_tree(ubi, e1, &ubi->used);
717 rb_erase(&e1->u.rb, &ubi->used);
719 ubi->fm_do_produce_anchor = 0;
720 } else if (!ubi->scrub.rb_node) {
722 if (!ubi->scrub.rb_node) {
729 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
730 e2 = get_peb_for_wl(ubi);
739 wl_tree_add(e2, &ubi->free);
740 ubi->free_count++;
743 self_check_in_wl_tree(ubi, e1, &ubi->used);
744 rb_erase(&e1->u.rb, &ubi->used);
750 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
751 e2 = get_peb_for_wl(ubi);
755 self_check_in_wl_tree(ubi, e1, &ubi->scrub);
756 rb_erase(&e1->u.rb, &ubi->scrub);
760 ubi->move_from = e1;
761 ubi->move_to = e2;
762 spin_unlock(&ubi->wl_lock);
775 err = ubi_io_read_vid_hdr(ubi, e1->pnum, vidb, 0);
780 * We are trying to move PEB without a VID header. UBI
802 } else if (ubi->fast_attach && err == UBI_IO_BAD_HDR_EBADMSG) {
814 ubi_err(ubi, "error %d while reading VID header from PEB %d",
822 err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vidb);
840 * be put back into ubi->scrub list.
841 * 2. Non-scrub type PEB will be put back into ubi->used
864 * put this PEB to the @ubi->erroneous list to prevent
865 * UBI from trying to move it over and over again.
867 if (ubi->erroneous_peb_count > ubi->max_erroneous) {
868 ubi_err(ubi, "too many erroneous eraseblocks (%d)",
869 ubi->erroneous_peb_count);
885 ubi_msg(ubi, "scrubbed PEB %d (LEB %d:%d), data moved to PEB %d",
889 spin_lock(&ubi->wl_lock);
890 if (!ubi->move_to_put) {
891 wl_tree_add(e2, &ubi->used);
894 ubi->move_from = ubi->move_to = NULL;
895 ubi->move_to_put = ubi->wl_scheduled = 0;
896 spin_unlock(&ubi->wl_lock);
898 err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
901 spin_lock(&ubi->wl_lock);
902 wl_entry_destroy(ubi, e2);
903 spin_unlock(&ubi->wl_lock);
915 err = do_sync_erase(ubi, e2, vol_id, lnum, 0);
921 mutex_unlock(&ubi->move_mutex);
922 up_read(&ubi->fm_eba_sem);
937 spin_lock(&ubi->wl_lock);
939 prot_queue_add(ubi, e1);
941 wl_tree_add(e1, &ubi->erroneous);
942 ubi->erroneous_peb_count += 1;
944 wl_tree_add(e1, &ubi->scrub);
946 wl_tree_add(e1, &ubi->used);
948 wl_tree_add(e2, &ubi->free);
949 ubi->free_count++;
952 ubi_assert(!ubi->move_to_put);
953 ubi->move_from = ubi->move_to = NULL;
954 ubi->wl_scheduled = 0;
955 spin_unlock(&ubi->wl_lock);
959 ensure_wear_leveling(ubi, 1);
961 err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
967 err = do_sync_erase(ubi, e1, vol_id, lnum, 1);
972 mutex_unlock(&ubi->move_mutex);
973 up_read(&ubi->fm_eba_sem);
978 ubi_err(ubi, "error %d while moving PEB %d to PEB %d",
981 ubi_err(ubi, "error %d while moving PEB %d (LEB %d:%d) to PEB %d",
983 spin_lock(&ubi->wl_lock);
984 ubi->move_from = ubi->move_to = NULL;
985 ubi->move_to_put = ubi->wl_scheduled = 0;
986 wl_entry_destroy(ubi, e1);
987 wl_entry_destroy(ubi, e2);
988 spin_unlock(&ubi->wl_lock);
993 ubi_ro_mode(ubi);
994 mutex_unlock(&ubi->move_mutex);
995 up_read(&ubi->fm_eba_sem);
1000 ubi->wl_scheduled = 0;
1001 spin_unlock(&ubi->wl_lock);
1002 mutex_unlock(&ubi->move_mutex);
1003 up_read(&ubi->fm_eba_sem);
1010 * @ubi: UBI device description object
1011 * @nested: set to non-zero if this function is called from UBI worker
1017 static int ensure_wear_leveling(struct ubi_device *ubi, int nested) argument
1022 spin_lock(&ubi->wl_lock);
1023 if (ubi->wl_scheduled)
1028 * If the ubi->scrub tree is not empty, scrubbing is needed, and the
1031 if (!ubi->scrub.rb_node) {
1033 if (!need_wear_leveling(ubi))
1039 if (!ubi->used.rb_node || !ubi->free.rb_node)
1049 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
1050 e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
1059 ubi->wl_scheduled = 1;
1060 spin_unlock(&ubi->wl_lock);
1070 __schedule_ubi_work(ubi, wrk);
1072 schedule_ubi_work(ubi, wrk);
1076 spin_lock(&ubi->wl_lock);
1077 ubi->wl_scheduled = 0;
1079 spin_unlock(&ubi->wl_lock);
1085 * @ubi: UBI device description object
1093 static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk) argument
1104 err = sync_erase(ubi, e, wl_wrk->torture);
1106 spin_lock(&ubi->wl_lock);
1108 if (!ubi->fm_disabled && !ubi->fm_anchor &&
1114 ubi->fm_anchor = e;
1115 ubi->fm_do_produce_anchor = 0;
1117 wl_tree_add(e, &ubi->free);
1118 ubi->free_count++;
1121 spin_unlock(&ubi->wl_lock);
1127 serve_prot_queue(ubi);
1130 err = ensure_wear_leveling(ubi, 1);
1134 ubi_err(ubi, "failed to erase PEB %d, error %d", pnum, err);
1141 err1 = schedule_erase(ubi, e, vol_id, lnum, 0, true);
1143 spin_lock(&ubi->wl_lock);
1144 wl_entry_destroy(ubi, e);
1145 spin_unlock(&ubi->wl_lock);
1152 spin_lock(&ubi->wl_lock);
1153 wl_entry_destroy(ubi, e);
1154 spin_unlock(&ubi->wl_lock);
1165 if (!ubi->bad_allowed) {
1166 ubi_err(ubi, "bad physical eraseblock %d detected", pnum);
1170 spin_lock(&ubi->volumes_lock);
1171 if (ubi->beb_rsvd_pebs == 0) {
1172 if (ubi->avail_pebs == 0) {
1173 spin_unlock(&ubi->volumes_lock);
1174 ubi_err(ubi, "no reserved/available physical eraseblocks");
1177 ubi->avail_pebs -= 1;
1180 spin_unlock(&ubi->volumes_lock);
1182 ubi_msg(ubi, "mark PEB %d as bad", pnum);
1183 err = ubi_io_mark_bad(ubi, pnum);
1187 spin_lock(&ubi->volumes_lock);
1188 if (ubi->beb_rsvd_pebs > 0) {
1194 ubi->avail_pebs += 1;
1197 ubi->beb_rsvd_pebs -= 1;
1199 ubi->bad_peb_count += 1;
1200 ubi->good_peb_count -= 1;
1201 ubi_calculate_reserved(ubi);
1203 ubi_warn(ubi, "no PEBs in the reserved pool, used an available PEB");
1204 else if (ubi->beb_rsvd_pebs)
1205 ubi_msg(ubi, "%d PEBs left in the reserve",
1206 ubi->beb_rsvd_pebs);
1208 ubi_warn(ubi, "last PEB from the reserve was used");
1209 spin_unlock(&ubi->volumes_lock);
1215 spin_lock(&ubi->volumes_lock);
1216 ubi->avail_pebs += 1;
1217 spin_unlock(&ubi->volumes_lock);
1219 ubi_ro_mode(ubi);
1223 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, argument
1233 wl_entry_destroy(ubi, e);
1237 ret = __erase_worker(ubi, wl_wrk);
1244 * @ubi: UBI device description object
1255 int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum, argument
1263 ubi_assert(pnum < ubi->peb_count);
1265 down_read(&ubi->fm_protect);
1268 spin_lock(&ubi->wl_lock);
1269 e = ubi->lookuptbl[pnum];
1275 * ubi_wl_put_peb) will set ubi ro_mode at the same time,
1278 spin_unlock(&ubi->wl_lock);
1279 up_read(&ubi->fm_protect);
1282 if (e == ubi->move_from) {
1289 spin_unlock(&ubi->wl_lock);
1291 /* Wait for the WL worker by taking the @ubi->move_mutex */
1292 mutex_lock(&ubi->move_mutex);
1293 mutex_unlock(&ubi->move_mutex);
1295 } else if (e == ubi->move_to) {
1306 ubi_assert(!ubi->move_to_put);
1307 ubi->move_to_put = 1;
1308 spin_unlock(&ubi->wl_lock);
1309 up_read(&ubi->fm_protect);
1312 if (in_wl_tree(e, &ubi->used)) {
1313 self_check_in_wl_tree(ubi, e, &ubi->used);
1314 rb_erase(&e->u.rb, &ubi->used);
1315 } else if (in_wl_tree(e, &ubi->scrub)) {
1316 self_check_in_wl_tree(ubi, e, &ubi->scrub);
1317 rb_erase(&e->u.rb, &ubi->scrub);
1318 } else if (in_wl_tree(e, &ubi->erroneous)) {
1319 self_check_in_wl_tree(ubi, e, &ubi->erroneous);
1320 rb_erase(&e->u.rb, &ubi->erroneous);
1321 ubi->erroneous_peb_count -= 1;
1322 ubi_assert(ubi->erroneous_peb_count >= 0);
1326 err = prot_queue_del(ubi, e->pnum);
1328 ubi_err(ubi, "PEB %d not found", pnum);
1329 ubi_ro_mode(ubi);
1330 spin_unlock(&ubi->wl_lock);
1331 up_read(&ubi->fm_protect);
1336 spin_unlock(&ubi->wl_lock);
1338 err = schedule_erase(ubi, e, vol_id, lnum, torture, false);
1340 spin_lock(&ubi->wl_lock);
1341 wl_tree_add(e, &ubi->used);
1342 spin_unlock(&ubi->wl_lock);
1345 up_read(&ubi->fm_protect);
1351 * @ubi: UBI device description object
1359 int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum) argument
1363 ubi_msg(ubi, "schedule PEB %d for scrubbing", pnum);
1366 spin_lock(&ubi->wl_lock);
1367 e = ubi->lookuptbl[pnum];
1368 if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) ||
1369 in_wl_tree(e, &ubi->erroneous)) {
1370 spin_unlock(&ubi->wl_lock);
1374 if (e == ubi->move_to) {
1381 spin_unlock(&ubi->wl_lock);
1387 if (in_wl_tree(e, &ubi->used)) {
1388 self_check_in_wl_tree(ubi, e, &ubi->used);
1389 rb_erase(&e->u.rb, &ubi->used);
1393 err = prot_queue_del(ubi, e->pnum);
1395 ubi_err(ubi, "PEB %d not found", pnum);
1396 ubi_ro_mode(ubi);
1397 spin_unlock(&ubi->wl_lock);
1402 wl_tree_add(e, &ubi->scrub);
1403 spin_unlock(&ubi->wl_lock);
1409 return ensure_wear_leveling(ubi, 0);
1414 * @ubi: UBI device description object
1424 int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum) argument
1434 vol_id, lnum, ubi->works_count);
1440 down_read(&ubi->work_sem);
1441 spin_lock(&ubi->wl_lock);
1442 list_for_each_entry_safe(wrk, tmp, &ubi->works, list) {
1446 ubi->works_count -= 1;
1447 ubi_assert(ubi->works_count >= 0);
1448 spin_unlock(&ubi->wl_lock);
1450 err = wrk->func(ubi, wrk, 0);
1452 up_read(&ubi->work_sem);
1456 spin_lock(&ubi->wl_lock);
1461 spin_unlock(&ubi->wl_lock);
1462 up_read(&ubi->work_sem);
1469 down_write(&ubi->work_sem);
1470 up_write(&ubi->work_sem);
1475 static bool scrub_possible(struct ubi_device *ubi, struct ubi_wl_entry *e) argument
1477 if (in_wl_tree(e, &ubi->scrub))
1479 else if (in_wl_tree(e, &ubi->erroneous))
1481 else if (ubi->move_from == e)
1483 else if (ubi->move_to == e)
1491 * @ubi: UBI device description object
1502 * %ENOENT, PEB is no longer used by UBI
1508 int ubi_bitflip_check(struct ubi_device *ubi, int pnum, int force) argument
1513 if (pnum < 0 || pnum >= ubi->peb_count) {
1522 down_write(&ubi->work_sem);
1528 spin_lock(&ubi->wl_lock);
1529 e = ubi->lookuptbl[pnum];
1531 spin_unlock(&ubi->wl_lock);
1539 if (!scrub_possible(ubi, e)) {
1540 spin_unlock(&ubi->wl_lock);
1544 spin_unlock(&ubi->wl_lock);
1547 mutex_lock(&ubi->buf_mutex);
1548 err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
1549 mutex_unlock(&ubi->buf_mutex);
1556 spin_lock(&ubi->wl_lock);
1559 * Recheck. We released wl_lock, UBI might have killed the
1562 e = ubi->lookuptbl[pnum];
1564 spin_unlock(&ubi->wl_lock);
1572 if (!scrub_possible(ubi, e)) {
1573 spin_unlock(&ubi->wl_lock);
1578 if (in_pq(ubi, e)) {
1579 prot_queue_del(ubi, e->pnum);
1580 wl_tree_add(e, &ubi->scrub);
1581 spin_unlock(&ubi->wl_lock);
1583 err = ensure_wear_leveling(ubi, 1);
1584 } else if (in_wl_tree(e, &ubi->used)) {
1585 rb_erase(&e->u.rb, &ubi->used);
1586 wl_tree_add(e, &ubi->scrub);
1587 spin_unlock(&ubi->wl_lock);
1589 err = ensure_wear_leveling(ubi, 1);
1590 } else if (in_wl_tree(e, &ubi->free)) {
1591 rb_erase(&e->u.rb, &ubi->free);
1592 ubi->free_count--;
1593 spin_unlock(&ubi->wl_lock);
1599 err = schedule_erase(ubi, e, UBI_UNKNOWN, UBI_UNKNOWN,
1602 spin_unlock(&ubi->wl_lock);
1613 up_write(&ubi->work_sem);
1621 * @ubi: UBI device description object
1624 static void tree_destroy(struct ubi_device *ubi, struct rb_root *root) argument
1646 wl_entry_destroy(ubi, e);
1652 * ubi_thread - UBI background thread.
1653 * @u: the UBI device description object pointer
1658 struct ubi_device *ubi = u; local
1660 ubi_msg(ubi, "background thread \"%s\" started, PID %d",
1661 ubi->bgt_name, task_pid_nr(current));
1673 spin_lock(&ubi->wl_lock);
1674 if (list_empty(&ubi->works) || ubi->ro_mode ||
1675 !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi)) {
1677 spin_unlock(&ubi->wl_lock);
1694 spin_unlock(&ubi->wl_lock);
1696 err = do_work(ubi);
1698 ubi_err(ubi, "%s: work failed with error code %d",
1699 ubi->bgt_name, err);
1705 ubi_msg(ubi, "%s: %d consecutive failures",
1706 ubi->bgt_name, WL_MAX_FAILURES);
1707 ubi_ro_mode(ubi);
1708 ubi->thread_enabled = 0;
1717 dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
1718 ubi->thread_enabled = 0;
1724 * @ubi: UBI device description object
1726 static void shutdown_work(struct ubi_device *ubi) argument
1728 while (!list_empty(&ubi->works)) {
1731 wrk = list_entry(ubi->works.next, struct ubi_work, list);
1733 wrk->func(ubi, wrk, 1);
1734 ubi->works_count -= 1;
1735 ubi_assert(ubi->works_count >= 0);
1740 * erase_aeb - erase a PEB given in UBI attach info PEB
1741 * @ubi: UBI device description object
1742 * @aeb: UBI attach info PEB
1745 static int erase_aeb(struct ubi_device *ubi, struct ubi_ainf_peb *aeb, bool sync) argument
1756 ubi->lookuptbl[e->pnum] = e;
1759 err = sync_erase(ubi, e, false);
1763 wl_tree_add(e, &ubi->free);
1764 ubi->free_count++;
1766 err = schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false);
1774 wl_entry_destroy(ubi, e);
1781 * @ubi: UBI device description object
1787 int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) argument
1795 ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT;
1796 spin_lock_init(&ubi->wl_lock);
1797 mutex_init(&ubi->move_mutex);
1798 init_rwsem(&ubi->work_sem);
1799 ubi->max_ec = ai->max_ec;
1800 INIT_LIST_HEAD(&ubi->works);
1802 sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
1805 ubi->lookuptbl = kcalloc(ubi->peb_count, sizeof(void *), GFP_KERNEL);
1806 if (!ubi->lookuptbl)
1810 INIT_LIST_HEAD(&ubi->pq[i]);
1811 ubi->pq_head = 0;
1813 ubi->free_count = 0;
1817 err = erase_aeb(ubi, aeb, false);
1837 wl_tree_add(e, &ubi->free);
1838 ubi->free_count++;
1840 ubi->lookuptbl[e->pnum] = e;
1857 ubi->lookuptbl[e->pnum] = e;
1862 wl_tree_add(e, &ubi->used);
1866 wl_tree_add(e, &ubi->scrub);
1876 e = ubi_find_fm_block(ubi, aeb->pnum);
1879 ubi_assert(!ubi->lookuptbl[e->pnum]);
1880 ubi->lookuptbl[e->pnum] = e;
1890 if (ubi->lookuptbl[aeb->pnum])
1905 err = erase_aeb(ubi, aeb, sync);
1915 ubi_assert(ubi->good_peb_count == found_pebs);
1918 ubi_fastmap_init(ubi, &reserved_pebs);
1920 if (ubi->avail_pebs < reserved_pebs) {
1921 ubi_err(ubi, "no enough physical eraseblocks (%d, need %d)",
1922 ubi->avail_pebs, reserved_pebs);
1923 if (ubi->corr_peb_count)
1924 ubi_err(ubi, "%d PEBs are corrupted and not used",
1925 ubi->corr_peb_count);
1929 ubi->avail_pebs -= reserved_pebs;
1930 ubi->rsvd_pebs += reserved_pebs;
1933 err = ensure_wear_leveling(ubi, 0);
1938 if (!ubi->ro_mode && !ubi->fm_disabled)
1939 ubi_ensure_anchor_pebs(ubi);
1944 shutdown_work(ubi);
1945 tree_destroy(ubi, &ubi->used);
1946 tree_destroy(ubi, &ubi->free);
1947 tree_destroy(ubi, &ubi->scrub);
1948 kfree(ubi->lookuptbl);
1954 * @ubi: UBI device description object
1956 static void protection_queue_destroy(struct ubi_device *ubi) argument
1962 list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
1964 wl_entry_destroy(ubi, e);
1971 * @ubi: UBI device description object
1973 void ubi_wl_close(struct ubi_device *ubi) argument
1976 ubi_fastmap_close(ubi);
1977 shutdown_work(ubi);
1978 protection_queue_destroy(ubi);
1979 tree_destroy(ubi, &ubi->used);
1980 tree_destroy(ubi, &ubi->erroneous);
1981 tree_destroy(ubi, &ubi->free);
1982 tree_destroy(ubi, &ubi->scrub);
1983 kfree(ubi->lookuptbl);
1988 * @ubi: UBI device description object
1996 static int self_check_ec(struct ubi_device *ubi, int pnum, int ec) argument
2002 if (!ubi_dbg_chk_gen(ubi))
2005 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
2009 err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
2018 ubi_err(ubi, "self-check failed for PEB %d", pnum);
2019 ubi_err(ubi, "read EC is %lld, should be %d", read_ec, ec);
2032 * @ubi: UBI device description object
2039 static int self_check_in_wl_tree(const struct ubi_device *ubi, argument
2042 if (!ubi_dbg_chk_gen(ubi))
2048 ubi_err(ubi, "self-check failed for PEB %d, EC %d, RB-tree %p ",
2057 * @ubi: UBI device description object
2060 * This function returns zero if @e is in @ubi->pq and %-EINVAL if it is not.
2062 static int self_check_in_pq(const struct ubi_device *ubi, argument
2065 if (!ubi_dbg_chk_gen(ubi))
2068 if (in_pq(ubi, e))
2071 ubi_err(ubi, "self-check failed for PEB %d, EC %d, Protect queue",
2077 static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi) argument
2081 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
2082 self_check_in_wl_tree(ubi, e, &ubi->free);
2083 ubi->free_count--;
2084 ubi_assert(ubi->free_count >= 0);
2085 rb_erase(&e->u.rb, &ubi->free);
2092 * @ubi: UBI device description object
2099 static int produce_free_peb(struct ubi_device *ubi) argument
2103 while (!ubi->free.rb_node && ubi->works_count) {
2104 spin_unlock(&ubi->wl_lock);
2107 err = do_work(ubi);
2109 spin_lock(&ubi->wl_lock);
2119 * @ubi: UBI device description object
2123 * Returns with ubi->fm_eba_sem held in read mode!
2125 int ubi_wl_get_peb(struct ubi_device *ubi) argument
2131 down_read(&ubi->fm_eba_sem);
2132 spin_lock(&ubi->wl_lock);
2133 if (!ubi->free.rb_node) {
2134 if (ubi->works_count == 0) {
2135 ubi_err(ubi, "no free eraseblocks");
2136 ubi_assert(list_empty(&ubi->works));
2137 spin_unlock(&ubi->wl_lock);
2141 err = produce_free_peb(ubi);
2143 spin_unlock(&ubi->wl_lock);
2146 spin_unlock(&ubi->wl_lock);
2147 up_read(&ubi->fm_eba_sem);
2151 e = wl_get_wle(ubi);
2152 prot_queue_add(ubi, e);
2153 spin_unlock(&ubi->wl_lock);
2155 err = ubi_self_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset,
2156 ubi->peb_size - ubi->vid_hdr_aloffset);
2158 ubi_err(ubi, "new PEB %d does not contain all 0xFF bytes", e->pnum);