Lines Matching full:ubi
9 * UBI wear-leveling sub-system.
23 * done asynchronously in context of the per-UBI device background thread,
37 * As it was said, for the UBI sub-system all physical eraseblocks are either
97 #include "ubi.h"
130 static int self_check_ec(struct ubi_device *ubi, int pnum, int ec);
131 static int self_check_in_wl_tree(const struct ubi_device *ubi,
133 static int self_check_in_pq(const struct ubi_device *ubi,
142 * the @ubi->used and @ubi->free RB-trees.
174 * @ubi: UBI device description object
180 static void wl_entry_destroy(struct ubi_device *ubi, struct ubi_wl_entry *e) in wl_entry_destroy() argument
182 ubi->lookuptbl[e->pnum] = NULL; in wl_entry_destroy()
188 * @ubi: UBI device description object
193 static int do_work(struct ubi_device *ubi) in do_work() argument
201 * @ubi->work_sem is used to synchronize with the workers. Workers take in do_work()
206 down_read(&ubi->work_sem); in do_work()
207 spin_lock(&ubi->wl_lock); in do_work()
208 if (list_empty(&ubi->works)) { in do_work()
209 spin_unlock(&ubi->wl_lock); in do_work()
210 up_read(&ubi->work_sem); in do_work()
214 wrk = list_entry(ubi->works.next, struct ubi_work, list); in do_work()
216 ubi->works_count -= 1; in do_work()
217 ubi_assert(ubi->works_count >= 0); in do_work()
218 spin_unlock(&ubi->wl_lock); in do_work()
225 err = wrk->func(ubi, wrk, 0); in do_work()
227 ubi_err(ubi, "work failed with error code %d", err); in do_work()
228 up_read(&ubi->work_sem); in do_work()
274 * @ubi: UBI device description object
277 * This function adds @e to the tail of the protection queue @ubi->pq, where
282 static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e) in prot_queue_add() argument
284 int pq_tail = ubi->pq_head - 1; in prot_queue_add()
289 list_add_tail(&e->u.list, &ubi->pq[pq_tail]); in prot_queue_add()
295 * @ubi: UBI device description object
302 static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi, in find_wl_entry() argument
329 if (prev_e && !ubi->fm_disabled && in find_wl_entry()
330 !ubi->fm && e->pnum < UBI_FM_MAX_START) in find_wl_entry()
338 * @ubi: UBI device description object
345 static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi, in find_mean_wl_entry() argument
359 e = may_reserve_for_fm(ubi, e, root); in find_mean_wl_entry()
361 e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2); in find_mean_wl_entry()
369 * @ubi: UBI device description object
374 static struct ubi_wl_entry *wl_get_wle(struct ubi_device *ubi) in wl_get_wle() argument
378 e = find_mean_wl_entry(ubi, &ubi->free); in wl_get_wle()
380 ubi_err(ubi, "no free eraseblocks"); in wl_get_wle()
384 self_check_in_wl_tree(ubi, e, &ubi->free); in wl_get_wle()
390 rb_erase(&e->u.rb, &ubi->free); in wl_get_wle()
391 ubi->free_count--; in wl_get_wle()
399 * @ubi: UBI device description object
405 static int prot_queue_del(struct ubi_device *ubi, int pnum) in prot_queue_del() argument
409 e = ubi->lookuptbl[pnum]; in prot_queue_del()
413 if (self_check_in_pq(ubi, e)) in prot_queue_del()
423 * @ubi: UBI device description object
430 static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, in sync_erase() argument
439 err = self_check_ec(ubi, e->pnum, e->ec); in sync_erase()
443 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS); in sync_erase()
447 err = ubi_io_sync_erase(ubi, e->pnum, torture); in sync_erase()
454 * Erase counter overflow. Upgrade UBI and use 64-bit in sync_erase()
457 ubi_err(ubi, "erase counter overflow at PEB %d, EC %llu", in sync_erase()
467 err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr); in sync_erase()
472 spin_lock(&ubi->wl_lock); in sync_erase()
473 if (e->ec > ubi->max_ec) in sync_erase()
474 ubi->max_ec = e->ec; in sync_erase()
475 spin_unlock(&ubi->wl_lock); in sync_erase()
484 * @ubi: UBI device description object
490 static void serve_prot_queue(struct ubi_device *ubi) in serve_prot_queue() argument
501 spin_lock(&ubi->wl_lock); in serve_prot_queue()
502 list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) { in serve_prot_queue()
507 wl_tree_add(e, &ubi->used); in serve_prot_queue()
513 spin_unlock(&ubi->wl_lock); in serve_prot_queue()
519 ubi->pq_head += 1; in serve_prot_queue()
520 if (ubi->pq_head == UBI_PROT_QUEUE_LEN) in serve_prot_queue()
521 ubi->pq_head = 0; in serve_prot_queue()
522 ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN); in serve_prot_queue()
523 spin_unlock(&ubi->wl_lock); in serve_prot_queue()
527 void ubi_do_worker(struct ubi_device *ubi) in ubi_do_worker() argument
531 if (list_empty(&ubi->works) || ubi->ro_mode || in ubi_do_worker()
532 !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi)) in ubi_do_worker()
535 spin_lock(&ubi->wl_lock); in ubi_do_worker()
536 while (!list_empty(&ubi->works)) { in ubi_do_worker()
541 spin_unlock(&ubi->wl_lock); in ubi_do_worker()
542 err = do_work(ubi); in ubi_do_worker()
543 spin_lock(&ubi->wl_lock); in ubi_do_worker()
545 ubi_err(ubi, "%s: work failed with error code %d", in ubi_do_worker()
546 ubi->bgt_name, err); in ubi_do_worker()
549 spin_unlock(&ubi->wl_lock); in ubi_do_worker()
555 * @ubi: UBI device description object
559 * list. Can only be used if ubi->work_sem is already held in read mode!
561 static void __schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk) in __schedule_ubi_work() argument
563 spin_lock(&ubi->wl_lock); in __schedule_ubi_work()
564 list_add_tail(&wrk->list, &ubi->works); in __schedule_ubi_work()
565 ubi_assert(ubi->works_count >= 0); in __schedule_ubi_work()
566 ubi->works_count += 1; in __schedule_ubi_work()
568 if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled(ubi)) in __schedule_ubi_work()
569 wake_up_process(ubi->bgt_thread); in __schedule_ubi_work()
571 spin_unlock(&ubi->wl_lock); in __schedule_ubi_work()
576 * @ubi: UBI device description object
582 static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk) in schedule_ubi_work() argument
584 down_read(&ubi->work_sem); in schedule_ubi_work()
585 __schedule_ubi_work(ubi, wrk); in schedule_ubi_work()
586 up_read(&ubi->work_sem); in schedule_ubi_work()
589 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
594 * @ubi: UBI device description object
603 static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, in schedule_erase() argument
623 schedule_ubi_work(ubi, wl_wrk); in schedule_erase()
626 ubi_do_worker(ubi); in schedule_erase()
633 * @ubi: UBI device description object
640 static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, in do_sync_erase() argument
656 return erase_worker(ubi, wl_wrk, 0); in do_sync_erase()
661 * @ubi: UBI device description object
670 static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, argument
685 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
689 mutex_lock(&ubi->move_mutex);
690 spin_lock(&ubi->wl_lock);
691 ubi_assert(!ubi->move_from && !ubi->move_to);
692 ubi_assert(!ubi->move_to_put);
694 if (!ubi->free.rb_node ||
695 (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
703 * @ubi->used tree later and the wear-leveling will be
707 !ubi->free.rb_node, !ubi->used.rb_node);
714 anchor = !anchor_pebs_avalible(&ubi->free);
717 e1 = find_anchor_wl_entry(&ubi->used);
720 e2 = get_peb_for_wl(ubi);
724 self_check_in_wl_tree(ubi, e1, &ubi->used);
725 rb_erase(&e1->u.rb, &ubi->used);
727 } else if (!ubi->scrub.rb_node) {
729 if (!ubi->scrub.rb_node) {
736 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
737 e2 = get_peb_for_wl(ubi);
746 wl_tree_add(e2, &ubi->free);
747 ubi->free_count++;
750 self_check_in_wl_tree(ubi, e1, &ubi->used);
751 rb_erase(&e1->u.rb, &ubi->used);
757 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
758 e2 = get_peb_for_wl(ubi);
762 self_check_in_wl_tree(ubi, e1, &ubi->scrub);
763 rb_erase(&e1->u.rb, &ubi->scrub);
767 ubi->move_from = e1;
768 ubi->move_to = e2;
769 spin_unlock(&ubi->wl_lock);
782 err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
786 * We are trying to move PEB without a VID header. UBI
810 ubi_err(ubi, "error %d while reading VID header from PEB %d",
818 err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
850 * put this PEB to the @ubi->erroneous list to prevent
851 * UBI from trying to move it over and over again.
853 if (ubi->erroneous_peb_count > ubi->max_erroneous) {
854 ubi_err(ubi, "too many erroneous eraseblocks (%d)",
855 ubi->erroneous_peb_count);
870 ubi_msg(ubi, "scrubbed PEB %d (LEB %d:%d), data moved to PEB %d",
872 ubi_free_vid_hdr(ubi, vid_hdr);
874 spin_lock(&ubi->wl_lock);
875 if (!ubi->move_to_put) {
876 wl_tree_add(e2, &ubi->used);
879 ubi->move_from = ubi->move_to = NULL;
880 ubi->move_to_put = ubi->wl_scheduled = 0;
881 spin_unlock(&ubi->wl_lock);
883 err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
886 wl_entry_destroy(ubi, e2);
897 err = do_sync_erase(ubi, e2, vol_id, lnum, 0);
903 mutex_unlock(&ubi->move_mutex);
918 spin_lock(&ubi->wl_lock);
920 prot_queue_add(ubi, e1);
922 wl_tree_add(e1, &ubi->erroneous);
923 ubi->erroneous_peb_count += 1;
925 wl_tree_add(e1, &ubi->scrub);
927 wl_tree_add(e1, &ubi->used);
928 ubi_assert(!ubi->move_to_put);
929 ubi->move_from = ubi->move_to = NULL;
930 ubi->wl_scheduled = 0;
931 spin_unlock(&ubi->wl_lock);
933 ubi_free_vid_hdr(ubi, vid_hdr);
934 err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
938 mutex_unlock(&ubi->move_mutex);
943 ubi_err(ubi, "error %d while moving PEB %d to PEB %d",
946 ubi_err(ubi, "error %d while moving PEB %d (LEB %d:%d) to PEB %d",
948 spin_lock(&ubi->wl_lock);
949 ubi->move_from = ubi->move_to = NULL;
950 ubi->move_to_put = ubi->wl_scheduled = 0;
951 spin_unlock(&ubi->wl_lock);
953 ubi_free_vid_hdr(ubi, vid_hdr);
954 wl_entry_destroy(ubi, e1);
955 wl_entry_destroy(ubi, e2);
958 ubi_ro_mode(ubi);
959 mutex_unlock(&ubi->move_mutex);
964 ubi->wl_scheduled = 0;
965 spin_unlock(&ubi->wl_lock);
966 mutex_unlock(&ubi->move_mutex);
967 ubi_free_vid_hdr(ubi, vid_hdr);
973 * @ubi: UBI device description object
974 * @nested: set to non-zero if this function is called from UBI worker
980 static int ensure_wear_leveling(struct ubi_device *ubi, int nested) argument
987 spin_lock(&ubi->wl_lock);
988 if (ubi->wl_scheduled)
993 * If the ubi->scrub tree is not empty, scrubbing is needed, and the
996 if (!ubi->scrub.rb_node) {
997 if (!ubi->used.rb_node || !ubi->free.rb_node)
1007 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
1008 e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
1016 ubi->wl_scheduled = 1;
1017 spin_unlock(&ubi->wl_lock);
1028 __schedule_ubi_work(ubi, wrk);
1031 schedule_ubi_work(ubi, wrk);
1034 schedule_ubi_work(ubi, wrk);
1035 ubi_do_worker(ubi);
1041 spin_lock(&ubi->wl_lock);
1042 ubi->wl_scheduled = 0;
1044 spin_unlock(&ubi->wl_lock);
1050 * @ubi: UBI device description object
1060 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, argument
1072 wl_entry_destroy(ubi, e);
1079 err = sync_erase(ubi, e, wl_wrk->torture);
1084 spin_lock(&ubi->wl_lock);
1085 wl_tree_add(e, &ubi->free);
1086 ubi->free_count++;
1087 spin_unlock(&ubi->wl_lock);
1093 serve_prot_queue(ubi);
1096 err = ensure_wear_leveling(ubi, 1);
1100 ubi_err(ubi, "failed to erase PEB %d, error %d", pnum, err);
1108 err1 = schedule_erase(ubi, e, vol_id, lnum, 0);
1116 wl_entry_destroy(ubi, e);
1127 if (!ubi->bad_allowed) {
1128 ubi_err(ubi, "bad physical eraseblock %d detected", pnum);
1132 spin_lock(&ubi->volumes_lock);
1133 if (ubi->beb_rsvd_pebs == 0) {
1134 if (ubi->avail_pebs == 0) {
1135 spin_unlock(&ubi->volumes_lock);
1136 ubi_err(ubi, "no reserved/available physical eraseblocks");
1139 ubi->avail_pebs -= 1;
1142 spin_unlock(&ubi->volumes_lock);
1144 ubi_msg(ubi, "mark PEB %d as bad", pnum);
1145 err = ubi_io_mark_bad(ubi, pnum);
1149 spin_lock(&ubi->volumes_lock);
1150 if (ubi->beb_rsvd_pebs > 0) {
1156 ubi->avail_pebs += 1;
1159 ubi->beb_rsvd_pebs -= 1;
1161 ubi->bad_peb_count += 1;
1162 ubi->good_peb_count -= 1;
1163 ubi_calculate_reserved(ubi);
1165 ubi_warn(ubi, "no PEBs in the reserved pool, used an available PEB");
1166 else if (ubi->beb_rsvd_pebs)
1167 ubi_msg(ubi, "%d PEBs left in the reserve",
1168 ubi->beb_rsvd_pebs);
1170 ubi_warn(ubi, "last PEB from the reserve was used");
1171 spin_unlock(&ubi->volumes_lock);
1177 spin_lock(&ubi->volumes_lock);
1178 ubi->avail_pebs += 1;
1179 spin_unlock(&ubi->volumes_lock);
1181 ubi_ro_mode(ubi);
1187 * @ubi: UBI device description object
1198 int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum, argument
1206 ubi_assert(pnum < ubi->peb_count);
1208 down_read(&ubi->fm_protect);
1211 spin_lock(&ubi->wl_lock);
1212 e = ubi->lookuptbl[pnum];
1213 if (e == ubi->move_from) {
1220 spin_unlock(&ubi->wl_lock);
1222 /* Wait for the WL worker by taking the @ubi->move_mutex */
1223 mutex_lock(&ubi->move_mutex);
1224 mutex_unlock(&ubi->move_mutex);
1226 } else if (e == ubi->move_to) {
1237 ubi_assert(!ubi->move_to_put);
1238 ubi->move_to_put = 1;
1239 spin_unlock(&ubi->wl_lock);
1240 up_read(&ubi->fm_protect);
1243 if (in_wl_tree(e, &ubi->used)) {
1244 self_check_in_wl_tree(ubi, e, &ubi->used);
1245 rb_erase(&e->u.rb, &ubi->used);
1246 } else if (in_wl_tree(e, &ubi->scrub)) {
1247 self_check_in_wl_tree(ubi, e, &ubi->scrub);
1248 rb_erase(&e->u.rb, &ubi->scrub);
1249 } else if (in_wl_tree(e, &ubi->erroneous)) {
1250 self_check_in_wl_tree(ubi, e, &ubi->erroneous);
1251 rb_erase(&e->u.rb, &ubi->erroneous);
1252 ubi->erroneous_peb_count -= 1;
1253 ubi_assert(ubi->erroneous_peb_count >= 0);
1257 err = prot_queue_del(ubi, e->pnum);
1259 ubi_err(ubi, "PEB %d not found", pnum);
1260 ubi_ro_mode(ubi);
1261 spin_unlock(&ubi->wl_lock);
1262 up_read(&ubi->fm_protect);
1267 spin_unlock(&ubi->wl_lock);
1269 err = schedule_erase(ubi, e, vol_id, lnum, torture);
1271 spin_lock(&ubi->wl_lock);
1272 wl_tree_add(e, &ubi->used);
1273 spin_unlock(&ubi->wl_lock);
1276 up_read(&ubi->fm_protect);
1282 * @ubi: UBI device description object
1290 int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum) argument
1294 ubi_msg(ubi, "schedule PEB %d for scrubbing", pnum);
1297 spin_lock(&ubi->wl_lock);
1298 e = ubi->lookuptbl[pnum];
1299 if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) ||
1300 in_wl_tree(e, &ubi->erroneous)) {
1301 spin_unlock(&ubi->wl_lock);
1305 if (e == ubi->move_to) {
1312 spin_unlock(&ubi->wl_lock);
1318 if (in_wl_tree(e, &ubi->used)) {
1319 self_check_in_wl_tree(ubi, e, &ubi->used);
1320 rb_erase(&e->u.rb, &ubi->used);
1324 err = prot_queue_del(ubi, e->pnum);
1326 ubi_err(ubi, "PEB %d not found", pnum);
1327 ubi_ro_mode(ubi);
1328 spin_unlock(&ubi->wl_lock);
1333 wl_tree_add(e, &ubi->scrub);
1334 spin_unlock(&ubi->wl_lock);
1340 return ensure_wear_leveling(ubi, 0);
1345 * @ubi: UBI device description object
1355 int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum) argument
1365 vol_id, lnum, ubi->works_count);
1371 down_read(&ubi->work_sem);
1372 spin_lock(&ubi->wl_lock);
1373 list_for_each_entry_safe(wrk, tmp, &ubi->works, list) {
1377 ubi->works_count -= 1;
1378 ubi_assert(ubi->works_count >= 0);
1379 spin_unlock(&ubi->wl_lock);
1381 err = wrk->func(ubi, wrk, 0);
1383 up_read(&ubi->work_sem);
1387 spin_lock(&ubi->wl_lock);
1392 spin_unlock(&ubi->wl_lock);
1393 up_read(&ubi->work_sem);
1400 down_write(&ubi->work_sem);
1401 up_write(&ubi->work_sem);
1408 * @ubi: UBI device description object
1411 static void tree_destroy(struct ubi_device *ubi, struct rb_root *root) argument
1433 wl_entry_destroy(ubi, e);
1439 * ubi_thread - UBI background thread.
1440 * @u: the UBI device description object pointer
1445 struct ubi_device *ubi = u; local
1447 ubi_msg(ubi, "background thread \"%s\" started, PID %d",
1448 ubi->bgt_name, task_pid_nr(current));
1460 spin_lock(&ubi->wl_lock);
1461 if (list_empty(&ubi->works) || ubi->ro_mode ||
1462 !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi)) {
1464 spin_unlock(&ubi->wl_lock);
1468 spin_unlock(&ubi->wl_lock);
1470 err = do_work(ubi);
1472 ubi_err(ubi, "%s: work failed with error code %d",
1473 ubi->bgt_name, err);
1479 ubi_msg(ubi, "%s: %d consecutive failures",
1480 ubi->bgt_name, WL_MAX_FAILURES);
1481 ubi_ro_mode(ubi);
1482 ubi->thread_enabled = 0;
1491 dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
1497 * @ubi: UBI device description object
1499 static void shutdown_work(struct ubi_device *ubi) argument
1503 flush_work(&ubi->fm_work);
1508 while (!list_empty(&ubi->works)) {
1511 wrk = list_entry(ubi->works.next, struct ubi_work, list);
1513 wrk->func(ubi, wrk, 1);
1514 ubi->works_count -= 1;
1515 ubi_assert(ubi->works_count >= 0);
1521 * @ubi: UBI device description object
1527 int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) argument
1535 ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT;
1536 spin_lock_init(&ubi->wl_lock);
1537 mutex_init(&ubi->move_mutex);
1538 init_rwsem(&ubi->work_sem);
1539 ubi->max_ec = ai->max_ec;
1540 INIT_LIST_HEAD(&ubi->works);
1542 sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
1545 ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL);
1546 if (!ubi->lookuptbl)
1550 INIT_LIST_HEAD(&ubi->pq[i]);
1551 ubi->pq_head = 0;
1553 ubi->free_count = 0;
1563 ubi->lookuptbl[e->pnum] = e;
1564 if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) {
1565 wl_entry_destroy(ubi, e);
1583 wl_tree_add(e, &ubi->free);
1584 ubi->free_count++;
1586 ubi->lookuptbl[e->pnum] = e;
1601 ubi->lookuptbl[e->pnum] = e;
1606 wl_tree_add(e, &ubi->used);
1610 wl_tree_add(e, &ubi->scrub);
1619 if (ubi->fm) {
1620 ubi_assert(ubi->good_peb_count ==
1621 found_pebs + ubi->fm->used_blocks);
1623 for (i = 0; i < ubi->fm->used_blocks; i++) {
1624 e = ubi->fm->e[i];
1625 ubi->lookuptbl[e->pnum] = e;
1629 ubi_assert(ubi->good_peb_count == found_pebs);
1632 ubi_fastmap_init(ubi, &reserved_pebs);
1634 if (ubi->avail_pebs < reserved_pebs) {
1635 ubi_err(ubi, "no enough physical eraseblocks (%d, need %d)",
1636 ubi->avail_pebs, reserved_pebs);
1637 if (ubi->corr_peb_count)
1638 ubi_err(ubi, "%d PEBs are corrupted and not used",
1639 ubi->corr_peb_count);
1642 ubi->avail_pebs -= reserved_pebs;
1643 ubi->rsvd_pebs += reserved_pebs;
1646 err = ensure_wear_leveling(ubi, 0);
1653 shutdown_work(ubi);
1654 tree_destroy(ubi, &ubi->used);
1655 tree_destroy(ubi, &ubi->free);
1656 tree_destroy(ubi, &ubi->scrub);
1657 kfree(ubi->lookuptbl);
1663 * @ubi: UBI device description object
1665 static void protection_queue_destroy(struct ubi_device *ubi) argument
1671 list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
1673 wl_entry_destroy(ubi, e);
1680 * @ubi: UBI device description object
1682 void ubi_wl_close(struct ubi_device *ubi) argument
1685 ubi_fastmap_close(ubi);
1686 shutdown_work(ubi);
1687 protection_queue_destroy(ubi);
1688 tree_destroy(ubi, &ubi->used);
1689 tree_destroy(ubi, &ubi->erroneous);
1690 tree_destroy(ubi, &ubi->free);
1691 tree_destroy(ubi, &ubi->scrub);
1692 kfree(ubi->lookuptbl);
1697 * @ubi: UBI device description object
1705 static int self_check_ec(struct ubi_device *ubi, int pnum, int ec) argument
1711 if (!ubi_dbg_chk_gen(ubi))
1714 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
1718 err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1727 ubi_err(ubi, "self-check failed for PEB %d", pnum);
1728 ubi_err(ubi, "read EC is %lld, should be %d", read_ec, ec);
1741 * @ubi: UBI device description object
1748 static int self_check_in_wl_tree(const struct ubi_device *ubi, argument
1751 if (!ubi_dbg_chk_gen(ubi))
1757 ubi_err(ubi, "self-check failed for PEB %d, EC %d, RB-tree %p ",
1766 * @ubi: UBI device description object
1769 * This function returns zero if @e is in @ubi->pq and %-EINVAL if it is not.
1771 static int self_check_in_pq(const struct ubi_device *ubi, argument
1777 if (!ubi_dbg_chk_gen(ubi))
1781 list_for_each_entry(p, &ubi->pq[i], u.list)
1785 ubi_err(ubi, "self-check failed for PEB %d, EC %d, Protect queue",
1791 static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi) argument
1795 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
1796 self_check_in_wl_tree(ubi, e, &ubi->free);
1797 ubi->free_count--;
1798 ubi_assert(ubi->free_count >= 0);
1799 rb_erase(&e->u.rb, &ubi->free);
1806 * @ubi: UBI device description object
1813 static int produce_free_peb(struct ubi_device *ubi) argument
1817 while (!ubi->free.rb_node && ubi->works_count) {
1818 spin_unlock(&ubi->wl_lock);
1821 err = do_work(ubi);
1823 spin_lock(&ubi->wl_lock);
1833 * @ubi: UBI device description object
1837 * Returns with ubi->fm_eba_sem held in read mode!
1839 int ubi_wl_get_peb(struct ubi_device *ubi) argument
1845 down_read(&ubi->fm_eba_sem);
1846 spin_lock(&ubi->wl_lock);
1847 if (!ubi->free.rb_node) {
1848 if (ubi->works_count == 0) {
1849 ubi_err(ubi, "no free eraseblocks");
1850 ubi_assert(list_empty(&ubi->works));
1851 spin_unlock(&ubi->wl_lock);
1855 err = produce_free_peb(ubi);
1857 spin_unlock(&ubi->wl_lock);
1860 spin_unlock(&ubi->wl_lock);
1861 up_read(&ubi->fm_eba_sem);
1865 e = wl_get_wle(ubi);
1866 prot_queue_add(ubi, e);
1867 spin_unlock(&ubi->wl_lock);
1869 err = ubi_self_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset,
1870 ubi->peb_size - ubi->vid_hdr_aloffset);
1872 ubi_err(ubi, "new PEB %d does not contain all 0xFF bytes", e->pnum);