1 /* 2 * Copyright (c) International Business Machines Corp., 2006 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See 12 * the GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 * 18 * Authors: Artem Bityutskiy (Битюцкий Артём), Thomas Gleixner 19 */ 20 21 /* 22 * UBI wear-leveling sub-system. 23 * 24 * This sub-system is responsible for wear-leveling. It works in terms of 25 * physical eraseblocks and erase counters and knows nothing about logical 26 * eraseblocks, volumes, etc. From this sub-system's perspective all physical 27 * eraseblocks are of two types - used and free. Used physical eraseblocks are 28 * those that were "get" by the 'ubi_wl_get_peb()' function, and free physical 29 * eraseblocks are those that were put by the 'ubi_wl_put_peb()' function. 30 * 31 * Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter 32 * header. The rest of the physical eraseblock contains only %0xFF bytes. 33 * 34 * When physical eraseblocks are returned to the WL sub-system by means of the 35 * 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is 36 * done asynchronously in context of the per-UBI device background thread, 37 * which is also managed by the WL sub-system. 38 * 39 * The wear-leveling is ensured by means of moving the contents of used 40 * physical eraseblocks with low erase counter to free physical eraseblocks 41 * with high erase counter. 42 * 43 * If the WL sub-system fails to erase a physical eraseblock, it marks it as 44 * bad. 45 * 46 * This sub-system is also responsible for scrubbing. If a bit-flip is detected 47 * in a physical eraseblock, it has to be moved. Technically this is the same 48 * as moving it for wear-leveling reasons. 49 * 50 * As it was said, for the UBI sub-system all physical eraseblocks are either 51 * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while 52 * used eraseblocks are kept in @wl->used, @wl->erroneous, or @wl->scrub 53 * RB-trees, as well as (temporarily) in the @wl->pq queue. 54 * 55 * When the WL sub-system returns a physical eraseblock, the physical 56 * eraseblock is protected from being moved for some "time". For this reason, 57 * the physical eraseblock is not directly moved from the @wl->free tree to the 58 * @wl->used tree. There is a protection queue in between where this 59 * physical eraseblock is temporarily stored (@wl->pq). 60 * 61 * All this protection stuff is needed because: 62 * o we don't want to move physical eraseblocks just after we have given them 63 * to the user; instead, we first want to let users fill them up with data; 64 * 65 * o there is a chance that the user will put the physical eraseblock very 66 * soon, so it makes sense not to move it for some time, but wait. 67 * 68 * Physical eraseblocks stay protected only for limited time. But the "time" is 69 * measured in erase cycles in this case. This is implemented with help of the 70 * protection queue. Eraseblocks are put to the tail of this queue when they 71 * are returned by the 'ubi_wl_get_peb()', and eraseblocks are removed from the 72 * head of the queue on each erase operation (for any eraseblock). So the 73 * length of the queue defines how may (global) erase cycles PEBs are protected. 74 * 75 * To put it differently, each physical eraseblock has 2 main states: free and 76 * used. The former state corresponds to the @wl->free tree. The latter state 77 * is split up on several sub-states: 78 * o the WL movement is allowed (@wl->used tree); 79 * o the WL movement is disallowed (@wl->erroneous) because the PEB is 80 * erroneous - e.g., there was a read error; 81 * o the WL movement is temporarily prohibited (@wl->pq queue); 82 * o scrubbing is needed (@wl->scrub tree). 83 * 84 * Depending on the sub-state, wear-leveling entries of the used physical 85 * eraseblocks may be kept in one of those structures. 86 * 87 * Note, in this implementation, we keep a small in-RAM object for each physical 88 * eraseblock. This is surely not a scalable solution. But it appears to be good 89 * enough for moderately large flashes and it is simple. In future, one may 90 * re-work this sub-system and make it more scalable. 91 * 92 * At the moment this sub-system does not utilize the sequence number, which 93 * was introduced relatively recently. But it would be wise to do this because 94 * the sequence number of a logical eraseblock characterizes how old is it. For 95 * example, when we move a PEB with low erase counter, and we need to pick the 96 * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we 97 * pick target PEB with an average EC if our PEB is not very "old". This is a 98 * room for future re-works of the WL sub-system. 99 */ 100 101 #include <linux/slab.h> 102 #include <linux/crc32.h> 103 #include <linux/freezer.h> 104 #include <linux/kthread.h> 105 #include "ubi.h" 106 107 /* Number of physical eraseblocks reserved for wear-leveling purposes */ 108 #define WL_RESERVED_PEBS 1 109 110 /* 111 * Maximum difference between two erase counters. If this threshold is 112 * exceeded, the WL sub-system starts moving data from used physical 113 * eraseblocks with low erase counter to free physical eraseblocks with high 114 * erase counter. 115 */ 116 #define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD 117 118 /* 119 * When a physical eraseblock is moved, the WL sub-system has to pick the target 120 * physical eraseblock to move to. The simplest way would be just to pick the 121 * one with the highest erase counter. But in certain workloads this could lead 122 * to an unlimited wear of one or few physical eraseblock. Indeed, imagine a 123 * situation when the picked physical eraseblock is constantly erased after the 124 * data is written to it. So, we have a constant which limits the highest erase 125 * counter of the free physical eraseblock to pick. Namely, the WL sub-system 126 * does not pick eraseblocks with erase counter greater than the lowest erase 127 * counter plus %WL_FREE_MAX_DIFF. 128 */ 129 #define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD) 130 131 /* 132 * Maximum number of consecutive background thread failures which is enough to 133 * switch to read-only mode. 134 */ 135 #define WL_MAX_FAILURES 32 136 137 static int self_check_ec(struct ubi_device *ubi, int pnum, int ec); 138 static int self_check_in_wl_tree(const struct ubi_device *ubi, 139 struct ubi_wl_entry *e, struct rb_root *root); 140 static int self_check_in_pq(const struct ubi_device *ubi, 141 struct ubi_wl_entry *e); 142 143 #ifdef CONFIG_MTD_UBI_FASTMAP 144 /** 145 * update_fastmap_work_fn - calls ubi_update_fastmap from a work queue 146 * @wrk: the work description object 147 */ 148 static void update_fastmap_work_fn(struct work_struct *wrk) 149 { 150 struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work); 151 ubi_update_fastmap(ubi); 152 } 153 154 /** 155 * ubi_ubi_is_fm_block - returns 1 if a PEB is currently used in a fastmap. 156 * @ubi: UBI device description object 157 * @pnum: the to be checked PEB 158 */ 159 static int ubi_is_fm_block(struct ubi_device *ubi, int pnum) 160 { 161 int i; 162 163 if (!ubi->fm) 164 return 0; 165 166 for (i = 0; i < ubi->fm->used_blocks; i++) 167 if (ubi->fm->e[i]->pnum == pnum) 168 return 1; 169 170 return 0; 171 } 172 #else 173 static int ubi_is_fm_block(struct ubi_device *ubi, int pnum) 174 { 175 return 0; 176 } 177 #endif 178 179 /** 180 * wl_tree_add - add a wear-leveling entry to a WL RB-tree. 181 * @e: the wear-leveling entry to add 182 * @root: the root of the tree 183 * 184 * Note, we use (erase counter, physical eraseblock number) pairs as keys in 185 * the @ubi->used and @ubi->free RB-trees. 186 */ 187 static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root) 188 { 189 struct rb_node **p, *parent = NULL; 190 191 p = &root->rb_node; 192 while (*p) { 193 struct ubi_wl_entry *e1; 194 195 parent = *p; 196 e1 = rb_entry(parent, struct ubi_wl_entry, u.rb); 197 198 if (e->ec < e1->ec) 199 p = &(*p)->rb_left; 200 else if (e->ec > e1->ec) 201 p = &(*p)->rb_right; 202 else { 203 ubi_assert(e->pnum != e1->pnum); 204 if (e->pnum < e1->pnum) 205 p = &(*p)->rb_left; 206 else 207 p = &(*p)->rb_right; 208 } 209 } 210 211 rb_link_node(&e->u.rb, parent, p); 212 rb_insert_color(&e->u.rb, root); 213 } 214 215 /** 216 * do_work - do one pending work. 217 * @ubi: UBI device description object 218 * 219 * This function returns zero in case of success and a negative error code in 220 * case of failure. 221 */ 222 static int do_work(struct ubi_device *ubi) 223 { 224 int err; 225 struct ubi_work *wrk; 226 227 cond_resched(); 228 229 /* 230 * @ubi->work_sem is used to synchronize with the workers. Workers take 231 * it in read mode, so many of them may be doing works at a time. But 232 * the queue flush code has to be sure the whole queue of works is 233 * done, and it takes the mutex in write mode. 234 */ 235 down_read(&ubi->work_sem); 236 spin_lock(&ubi->wl_lock); 237 if (list_empty(&ubi->works)) { 238 spin_unlock(&ubi->wl_lock); 239 up_read(&ubi->work_sem); 240 return 0; 241 } 242 243 wrk = list_entry(ubi->works.next, struct ubi_work, list); 244 list_del(&wrk->list); 245 ubi->works_count -= 1; 246 ubi_assert(ubi->works_count >= 0); 247 spin_unlock(&ubi->wl_lock); 248 249 /* 250 * Call the worker function. Do not touch the work structure 251 * after this call as it will have been freed or reused by that 252 * time by the worker function. 253 */ 254 err = wrk->func(ubi, wrk, 0); 255 if (err) 256 ubi_err(ubi, "work failed with error code %d", err); 257 up_read(&ubi->work_sem); 258 259 return err; 260 } 261 262 /** 263 * produce_free_peb - produce a free physical eraseblock. 264 * @ubi: UBI device description object 265 * 266 * This function tries to make a free PEB by means of synchronous execution of 267 * pending works. This may be needed if, for example the background thread is 268 * disabled. Returns zero in case of success and a negative error code in case 269 * of failure. 270 */ 271 static int produce_free_peb(struct ubi_device *ubi) 272 { 273 int err; 274 275 while (!ubi->free.rb_node && ubi->works_count) { 276 spin_unlock(&ubi->wl_lock); 277 278 dbg_wl("do one work synchronously"); 279 err = do_work(ubi); 280 281 spin_lock(&ubi->wl_lock); 282 if (err) 283 return err; 284 } 285 286 return 0; 287 } 288 289 /** 290 * in_wl_tree - check if wear-leveling entry is present in a WL RB-tree. 291 * @e: the wear-leveling entry to check 292 * @root: the root of the tree 293 * 294 * This function returns non-zero if @e is in the @root RB-tree and zero if it 295 * is not. 296 */ 297 static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root) 298 { 299 struct rb_node *p; 300 301 p = root->rb_node; 302 while (p) { 303 struct ubi_wl_entry *e1; 304 305 e1 = rb_entry(p, struct ubi_wl_entry, u.rb); 306 307 if (e->pnum == e1->pnum) { 308 ubi_assert(e == e1); 309 return 1; 310 } 311 312 if (e->ec < e1->ec) 313 p = p->rb_left; 314 else if (e->ec > e1->ec) 315 p = p->rb_right; 316 else { 317 ubi_assert(e->pnum != e1->pnum); 318 if (e->pnum < e1->pnum) 319 p = p->rb_left; 320 else 321 p = p->rb_right; 322 } 323 } 324 325 return 0; 326 } 327 328 /** 329 * prot_queue_add - add physical eraseblock to the protection queue. 330 * @ubi: UBI device description object 331 * @e: the physical eraseblock to add 332 * 333 * This function adds @e to the tail of the protection queue @ubi->pq, where 334 * @e will stay for %UBI_PROT_QUEUE_LEN erase operations and will be 335 * temporarily protected from the wear-leveling worker. Note, @wl->lock has to 336 * be locked. 337 */ 338 static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e) 339 { 340 int pq_tail = ubi->pq_head - 1; 341 342 if (pq_tail < 0) 343 pq_tail = UBI_PROT_QUEUE_LEN - 1; 344 ubi_assert(pq_tail >= 0 && pq_tail < UBI_PROT_QUEUE_LEN); 345 list_add_tail(&e->u.list, &ubi->pq[pq_tail]); 346 dbg_wl("added PEB %d EC %d to the protection queue", e->pnum, e->ec); 347 } 348 349 /** 350 * find_wl_entry - find wear-leveling entry closest to certain erase counter. 351 * @ubi: UBI device description object 352 * @root: the RB-tree where to look for 353 * @diff: maximum possible difference from the smallest erase counter 354 * 355 * This function looks for a wear leveling entry with erase counter closest to 356 * min + @diff, where min is the smallest erase counter. 357 */ 358 static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi, 359 struct rb_root *root, int diff) 360 { 361 struct rb_node *p; 362 struct ubi_wl_entry *e, *prev_e = NULL; 363 int max; 364 365 e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb); 366 max = e->ec + diff; 367 368 p = root->rb_node; 369 while (p) { 370 struct ubi_wl_entry *e1; 371 372 e1 = rb_entry(p, struct ubi_wl_entry, u.rb); 373 if (e1->ec >= max) 374 p = p->rb_left; 375 else { 376 p = p->rb_right; 377 prev_e = e; 378 e = e1; 379 } 380 } 381 382 /* If no fastmap has been written and this WL entry can be used 383 * as anchor PEB, hold it back and return the second best WL entry 384 * such that fastmap can use the anchor PEB later. */ 385 if (prev_e && !ubi->fm_disabled && 386 !ubi->fm && e->pnum < UBI_FM_MAX_START) 387 return prev_e; 388 389 return e; 390 } 391 392 /** 393 * find_mean_wl_entry - find wear-leveling entry with medium erase counter. 394 * @ubi: UBI device description object 395 * @root: the RB-tree where to look for 396 * 397 * This function looks for a wear leveling entry with medium erase counter, 398 * but not greater or equivalent than the lowest erase counter plus 399 * %WL_FREE_MAX_DIFF/2. 400 */ 401 static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi, 402 struct rb_root *root) 403 { 404 struct ubi_wl_entry *e, *first, *last; 405 406 first = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb); 407 last = rb_entry(rb_last(root), struct ubi_wl_entry, u.rb); 408 409 if (last->ec - first->ec < WL_FREE_MAX_DIFF) { 410 e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb); 411 412 #ifdef CONFIG_MTD_UBI_FASTMAP 413 /* If no fastmap has been written and this WL entry can be used 414 * as anchor PEB, hold it back and return the second best 415 * WL entry such that fastmap can use the anchor PEB later. */ 416 if (e && !ubi->fm_disabled && !ubi->fm && 417 e->pnum < UBI_FM_MAX_START) 418 e = rb_entry(rb_next(root->rb_node), 419 struct ubi_wl_entry, u.rb); 420 #endif 421 } else 422 e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2); 423 424 return e; 425 } 426 427 #ifdef CONFIG_MTD_UBI_FASTMAP 428 /** 429 * find_anchor_wl_entry - find wear-leveling entry to used as anchor PEB. 430 * @root: the RB-tree where to look for 431 */ 432 static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root) 433 { 434 struct rb_node *p; 435 struct ubi_wl_entry *e, *victim = NULL; 436 int max_ec = UBI_MAX_ERASECOUNTER; 437 438 ubi_rb_for_each_entry(p, e, root, u.rb) { 439 if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) { 440 victim = e; 441 max_ec = e->ec; 442 } 443 } 444 445 return victim; 446 } 447 448 static int anchor_pebs_avalible(struct rb_root *root) 449 { 450 struct rb_node *p; 451 struct ubi_wl_entry *e; 452 453 ubi_rb_for_each_entry(p, e, root, u.rb) 454 if (e->pnum < UBI_FM_MAX_START) 455 return 1; 456 457 return 0; 458 } 459 460 /** 461 * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number. 462 * @ubi: UBI device description object 463 * @anchor: This PEB will be used as anchor PEB by fastmap 464 * 465 * The function returns a physical erase block with a given maximal number 466 * and removes it from the wl subsystem. 467 * Must be called with wl_lock held! 468 */ 469 struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor) 470 { 471 struct ubi_wl_entry *e = NULL; 472 473 if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1)) { 474 ubi_warn(ubi, "Can't get peb for fastmap:anchor=%d, free_cnt=%d, reserved=%d", 475 anchor, ubi->free_count, ubi->beb_rsvd_pebs); 476 goto out; 477 } 478 479 if (anchor) 480 e = find_anchor_wl_entry(&ubi->free); 481 else 482 e = find_mean_wl_entry(ubi, &ubi->free); 483 484 if (!e) 485 goto out; 486 487 self_check_in_wl_tree(ubi, e, &ubi->free); 488 489 /* remove it from the free list, 490 * the wl subsystem does no longer know this erase block */ 491 rb_erase(&e->u.rb, &ubi->free); 492 ubi->free_count--; 493 out: 494 return e; 495 } 496 #endif 497 498 /** 499 * __wl_get_peb - get a physical eraseblock. 500 * @ubi: UBI device description object 501 * 502 * This function returns a physical eraseblock in case of success and a 503 * negative error code in case of failure. 504 */ 505 static int __wl_get_peb(struct ubi_device *ubi) 506 { 507 int err; 508 struct ubi_wl_entry *e; 509 510 retry: 511 if (!ubi->free.rb_node) { 512 if (ubi->works_count == 0) { 513 ubi_err(ubi, "no free eraseblocks"); 514 ubi_assert(list_empty(&ubi->works)); 515 return -ENOSPC; 516 } 517 518 err = produce_free_peb(ubi); 519 if (err < 0) 520 return err; 521 goto retry; 522 } 523 524 e = find_mean_wl_entry(ubi, &ubi->free); 525 if (!e) { 526 ubi_err(ubi, "no free eraseblocks"); 527 return -ENOSPC; 528 } 529 530 self_check_in_wl_tree(ubi, e, &ubi->free); 531 532 /* 533 * Move the physical eraseblock to the protection queue where it will 534 * be protected from being moved for some time. 535 */ 536 rb_erase(&e->u.rb, &ubi->free); 537 ubi->free_count--; 538 dbg_wl("PEB %d EC %d", e->pnum, e->ec); 539 #ifndef CONFIG_MTD_UBI_FASTMAP 540 /* We have to enqueue e only if fastmap is disabled, 541 * is fastmap enabled prot_queue_add() will be called by 542 * ubi_wl_get_peb() after removing e from the pool. */ 543 prot_queue_add(ubi, e); 544 #endif 545 return e->pnum; 546 } 547 548 #ifdef CONFIG_MTD_UBI_FASTMAP 549 /** 550 * return_unused_pool_pebs - returns unused PEB to the free tree. 551 * @ubi: UBI device description object 552 * @pool: fastmap pool description object 553 */ 554 static void return_unused_pool_pebs(struct ubi_device *ubi, 555 struct ubi_fm_pool *pool) 556 { 557 int i; 558 struct ubi_wl_entry *e; 559 560 for (i = pool->used; i < pool->size; i++) { 561 e = ubi->lookuptbl[pool->pebs[i]]; 562 wl_tree_add(e, &ubi->free); 563 ubi->free_count++; 564 } 565 } 566 567 /** 568 * refill_wl_pool - refills all the fastmap pool used by the 569 * WL sub-system. 570 * @ubi: UBI device description object 571 */ 572 static void refill_wl_pool(struct ubi_device *ubi) 573 { 574 struct ubi_wl_entry *e; 575 struct ubi_fm_pool *pool = &ubi->fm_wl_pool; 576 577 return_unused_pool_pebs(ubi, pool); 578 579 for (pool->size = 0; pool->size < pool->max_size; pool->size++) { 580 if (!ubi->free.rb_node || 581 (ubi->free_count - ubi->beb_rsvd_pebs < 5)) 582 break; 583 584 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF); 585 self_check_in_wl_tree(ubi, e, &ubi->free); 586 rb_erase(&e->u.rb, &ubi->free); 587 ubi->free_count--; 588 589 pool->pebs[pool->size] = e->pnum; 590 } 591 pool->used = 0; 592 } 593 594 /** 595 * refill_wl_user_pool - refills all the fastmap pool used by ubi_wl_get_peb. 596 * @ubi: UBI device description object 597 */ 598 static void refill_wl_user_pool(struct ubi_device *ubi) 599 { 600 struct ubi_fm_pool *pool = &ubi->fm_pool; 601 602 return_unused_pool_pebs(ubi, pool); 603 604 for (pool->size = 0; pool->size < pool->max_size; pool->size++) { 605 pool->pebs[pool->size] = __wl_get_peb(ubi); 606 if (pool->pebs[pool->size] < 0) 607 break; 608 } 609 pool->used = 0; 610 } 611 612 /** 613 * ubi_refill_pools - refills all fastmap PEB pools. 614 * @ubi: UBI device description object 615 */ 616 void ubi_refill_pools(struct ubi_device *ubi) 617 { 618 spin_lock(&ubi->wl_lock); 619 refill_wl_pool(ubi); 620 refill_wl_user_pool(ubi); 621 spin_unlock(&ubi->wl_lock); 622 } 623 624 /* ubi_wl_get_peb - works exaclty like __wl_get_peb but keeps track of 625 * the fastmap pool. 626 */ 627 int ubi_wl_get_peb(struct ubi_device *ubi) 628 { 629 int ret; 630 struct ubi_fm_pool *pool = &ubi->fm_pool; 631 struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool; 632 633 if (!pool->size || !wl_pool->size || pool->used == pool->size || 634 wl_pool->used == wl_pool->size) 635 ubi_update_fastmap(ubi); 636 637 /* we got not a single free PEB */ 638 if (!pool->size) 639 ret = -ENOSPC; 640 else { 641 spin_lock(&ubi->wl_lock); 642 ret = pool->pebs[pool->used++]; 643 prot_queue_add(ubi, ubi->lookuptbl[ret]); 644 spin_unlock(&ubi->wl_lock); 645 } 646 647 return ret; 648 } 649 650 /* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system. 651 * 652 * @ubi: UBI device description object 653 */ 654 static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi) 655 { 656 struct ubi_fm_pool *pool = &ubi->fm_wl_pool; 657 int pnum; 658 659 if (pool->used == pool->size || !pool->size) { 660 /* We cannot update the fastmap here because this 661 * function is called in atomic context. 662 * Let's fail here and refill/update it as soon as possible. */ 663 schedule_work(&ubi->fm_work); 664 return NULL; 665 } else { 666 pnum = pool->pebs[pool->used++]; 667 return ubi->lookuptbl[pnum]; 668 } 669 } 670 #else 671 static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi) 672 { 673 struct ubi_wl_entry *e; 674 675 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF); 676 self_check_in_wl_tree(ubi, e, &ubi->free); 677 ubi->free_count--; 678 ubi_assert(ubi->free_count >= 0); 679 rb_erase(&e->u.rb, &ubi->free); 680 681 return e; 682 } 683 684 int ubi_wl_get_peb(struct ubi_device *ubi) 685 { 686 int peb, err; 687 688 spin_lock(&ubi->wl_lock); 689 peb = __wl_get_peb(ubi); 690 spin_unlock(&ubi->wl_lock); 691 692 if (peb < 0) 693 return peb; 694 695 err = ubi_self_check_all_ff(ubi, peb, ubi->vid_hdr_aloffset, 696 ubi->peb_size - ubi->vid_hdr_aloffset); 697 if (err) { 698 ubi_err(ubi, "new PEB %d does not contain all 0xFF bytes", 699 peb); 700 return err; 701 } 702 703 return peb; 704 } 705 #endif 706 707 /** 708 * prot_queue_del - remove a physical eraseblock from the protection queue. 709 * @ubi: UBI device description object 710 * @pnum: the physical eraseblock to remove 711 * 712 * This function deletes PEB @pnum from the protection queue and returns zero 713 * in case of success and %-ENODEV if the PEB was not found. 714 */ 715 static int prot_queue_del(struct ubi_device *ubi, int pnum) 716 { 717 struct ubi_wl_entry *e; 718 719 e = ubi->lookuptbl[pnum]; 720 if (!e) 721 return -ENODEV; 722 723 if (self_check_in_pq(ubi, e)) 724 return -ENODEV; 725 726 list_del(&e->u.list); 727 dbg_wl("deleted PEB %d from the protection queue", e->pnum); 728 return 0; 729 } 730 731 /** 732 * sync_erase - synchronously erase a physical eraseblock. 733 * @ubi: UBI device description object 734 * @e: the the physical eraseblock to erase 735 * @torture: if the physical eraseblock has to be tortured 736 * 737 * This function returns zero in case of success and a negative error code in 738 * case of failure. 739 */ 740 static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, 741 int torture) 742 { 743 int err; 744 struct ubi_ec_hdr *ec_hdr; 745 unsigned long long ec = e->ec; 746 747 dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec); 748 749 err = self_check_ec(ubi, e->pnum, e->ec); 750 if (err) 751 return -EINVAL; 752 753 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS); 754 if (!ec_hdr) 755 return -ENOMEM; 756 757 err = ubi_io_sync_erase(ubi, e->pnum, torture); 758 if (err < 0) 759 goto out_free; 760 761 ec += err; 762 if (ec > UBI_MAX_ERASECOUNTER) { 763 /* 764 * Erase counter overflow. Upgrade UBI and use 64-bit 765 * erase counters internally. 766 */ 767 ubi_err(ubi, "erase counter overflow at PEB %d, EC %llu", 768 e->pnum, ec); 769 err = -EINVAL; 770 goto out_free; 771 } 772 773 dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec); 774 775 ec_hdr->ec = cpu_to_be64(ec); 776 777 err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr); 778 if (err) 779 goto out_free; 780 781 e->ec = ec; 782 spin_lock(&ubi->wl_lock); 783 if (e->ec > ubi->max_ec) 784 ubi->max_ec = e->ec; 785 spin_unlock(&ubi->wl_lock); 786 787 out_free: 788 kfree(ec_hdr); 789 return err; 790 } 791 792 /** 793 * serve_prot_queue - check if it is time to stop protecting PEBs. 794 * @ubi: UBI device description object 795 * 796 * This function is called after each erase operation and removes PEBs from the 797 * tail of the protection queue. These PEBs have been protected for long enough 798 * and should be moved to the used tree. 799 */ 800 static void serve_prot_queue(struct ubi_device *ubi) 801 { 802 struct ubi_wl_entry *e, *tmp; 803 int count; 804 805 /* 806 * There may be several protected physical eraseblock to remove, 807 * process them all. 808 */ 809 repeat: 810 count = 0; 811 spin_lock(&ubi->wl_lock); 812 list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) { 813 dbg_wl("PEB %d EC %d protection over, move to used tree", 814 e->pnum, e->ec); 815 816 list_del(&e->u.list); 817 wl_tree_add(e, &ubi->used); 818 if (count++ > 32) { 819 /* 820 * Let's be nice and avoid holding the spinlock for 821 * too long. 822 */ 823 spin_unlock(&ubi->wl_lock); 824 cond_resched(); 825 goto repeat; 826 } 827 } 828 829 ubi->pq_head += 1; 830 if (ubi->pq_head == UBI_PROT_QUEUE_LEN) 831 ubi->pq_head = 0; 832 ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN); 833 spin_unlock(&ubi->wl_lock); 834 } 835 836 /** 837 * __schedule_ubi_work - schedule a work. 838 * @ubi: UBI device description object 839 * @wrk: the work to schedule 840 * 841 * This function adds a work defined by @wrk to the tail of the pending works 842 * list. Can only be used if ubi->work_sem is already held in read mode! 843 */ 844 static void __schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk) 845 { 846 spin_lock(&ubi->wl_lock); 847 list_add_tail(&wrk->list, &ubi->works); 848 ubi_assert(ubi->works_count >= 0); 849 ubi->works_count += 1; 850 if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled(ubi)) 851 wake_up_process(ubi->bgt_thread); 852 spin_unlock(&ubi->wl_lock); 853 } 854 855 /** 856 * schedule_ubi_work - schedule a work. 857 * @ubi: UBI device description object 858 * @wrk: the work to schedule 859 * 860 * This function adds a work defined by @wrk to the tail of the pending works 861 * list. 862 */ 863 static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk) 864 { 865 down_read(&ubi->work_sem); 866 __schedule_ubi_work(ubi, wrk); 867 up_read(&ubi->work_sem); 868 } 869 870 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, 871 int shutdown); 872 873 #ifdef CONFIG_MTD_UBI_FASTMAP 874 /** 875 * ubi_is_erase_work - checks whether a work is erase work. 876 * @wrk: The work object to be checked 877 */ 878 int ubi_is_erase_work(struct ubi_work *wrk) 879 { 880 return wrk->func == erase_worker; 881 } 882 #endif 883 884 /** 885 * schedule_erase - schedule an erase work. 886 * @ubi: UBI device description object 887 * @e: the WL entry of the physical eraseblock to erase 888 * @vol_id: the volume ID that last used this PEB 889 * @lnum: the last used logical eraseblock number for the PEB 890 * @torture: if the physical eraseblock has to be tortured 891 * 892 * This function returns zero in case of success and a %-ENOMEM in case of 893 * failure. 894 */ 895 static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, 896 int vol_id, int lnum, int torture) 897 { 898 struct ubi_work *wl_wrk; 899 900 ubi_assert(e); 901 ubi_assert(!ubi_is_fm_block(ubi, e->pnum)); 902 903 dbg_wl("schedule erasure of PEB %d, EC %d, torture %d", 904 e->pnum, e->ec, torture); 905 906 wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS); 907 if (!wl_wrk) 908 return -ENOMEM; 909 910 wl_wrk->func = &erase_worker; 911 wl_wrk->e = e; 912 wl_wrk->vol_id = vol_id; 913 wl_wrk->lnum = lnum; 914 wl_wrk->torture = torture; 915 916 schedule_ubi_work(ubi, wl_wrk); 917 return 0; 918 } 919 920 /** 921 * do_sync_erase - run the erase worker synchronously. 922 * @ubi: UBI device description object 923 * @e: the WL entry of the physical eraseblock to erase 924 * @vol_id: the volume ID that last used this PEB 925 * @lnum: the last used logical eraseblock number for the PEB 926 * @torture: if the physical eraseblock has to be tortured 927 * 928 */ 929 static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, 930 int vol_id, int lnum, int torture) 931 { 932 struct ubi_work *wl_wrk; 933 934 dbg_wl("sync erase of PEB %i", e->pnum); 935 936 wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS); 937 if (!wl_wrk) 938 return -ENOMEM; 939 940 wl_wrk->e = e; 941 wl_wrk->vol_id = vol_id; 942 wl_wrk->lnum = lnum; 943 wl_wrk->torture = torture; 944 945 return erase_worker(ubi, wl_wrk, 0); 946 } 947 948 #ifdef CONFIG_MTD_UBI_FASTMAP 949 /** 950 * ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling 951 * sub-system. 952 * see: ubi_wl_put_peb() 953 * 954 * @ubi: UBI device description object 955 * @fm_e: physical eraseblock to return 956 * @lnum: the last used logical eraseblock number for the PEB 957 * @torture: if this physical eraseblock has to be tortured 958 */ 959 int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e, 960 int lnum, int torture) 961 { 962 struct ubi_wl_entry *e; 963 int vol_id, pnum = fm_e->pnum; 964 965 dbg_wl("PEB %d", pnum); 966 967 ubi_assert(pnum >= 0); 968 ubi_assert(pnum < ubi->peb_count); 969 970 spin_lock(&ubi->wl_lock); 971 e = ubi->lookuptbl[pnum]; 972 973 /* This can happen if we recovered from a fastmap the very 974 * first time and writing now a new one. In this case the wl system 975 * has never seen any PEB used by the original fastmap. 976 */ 977 if (!e) { 978 e = fm_e; 979 ubi_assert(e->ec >= 0); 980 ubi->lookuptbl[pnum] = e; 981 } else { 982 e->ec = fm_e->ec; 983 kfree(fm_e); 984 } 985 986 spin_unlock(&ubi->wl_lock); 987 988 vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID; 989 return schedule_erase(ubi, e, vol_id, lnum, torture); 990 } 991 #endif 992 993 /** 994 * wear_leveling_worker - wear-leveling worker function. 995 * @ubi: UBI device description object 996 * @wrk: the work object 997 * @shutdown: non-zero if the worker has to free memory and exit 998 * because the WL-subsystem is shutting down 999 * 1000 * This function copies a more worn out physical eraseblock to a less worn out 1001 * one. Returns zero in case of success and a negative error code in case of 1002 * failure. 1003 */ 1004 static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, 1005 int shutdown) 1006 { 1007 int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0; 1008 int vol_id = -1, uninitialized_var(lnum); 1009 #ifdef CONFIG_MTD_UBI_FASTMAP 1010 int anchor = wrk->anchor; 1011 #endif 1012 struct ubi_wl_entry *e1, *e2; 1013 struct ubi_vid_hdr *vid_hdr; 1014 1015 kfree(wrk); 1016 if (shutdown) 1017 return 0; 1018 1019 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); 1020 if (!vid_hdr) 1021 return -ENOMEM; 1022 1023 mutex_lock(&ubi->move_mutex); 1024 spin_lock(&ubi->wl_lock); 1025 ubi_assert(!ubi->move_from && !ubi->move_to); 1026 ubi_assert(!ubi->move_to_put); 1027 1028 if (!ubi->free.rb_node || 1029 (!ubi->used.rb_node && !ubi->scrub.rb_node)) { 1030 /* 1031 * No free physical eraseblocks? Well, they must be waiting in 1032 * the queue to be erased. Cancel movement - it will be 1033 * triggered again when a free physical eraseblock appears. 1034 * 1035 * No used physical eraseblocks? They must be temporarily 1036 * protected from being moved. They will be moved to the 1037 * @ubi->used tree later and the wear-leveling will be 1038 * triggered again. 1039 */ 1040 dbg_wl("cancel WL, a list is empty: free %d, used %d", 1041 !ubi->free.rb_node, !ubi->used.rb_node); 1042 goto out_cancel; 1043 } 1044 1045 #ifdef CONFIG_MTD_UBI_FASTMAP 1046 /* Check whether we need to produce an anchor PEB */ 1047 if (!anchor) 1048 anchor = !anchor_pebs_avalible(&ubi->free); 1049 1050 if (anchor) { 1051 e1 = find_anchor_wl_entry(&ubi->used); 1052 if (!e1) 1053 goto out_cancel; 1054 e2 = get_peb_for_wl(ubi); 1055 if (!e2) 1056 goto out_cancel; 1057 1058 self_check_in_wl_tree(ubi, e1, &ubi->used); 1059 rb_erase(&e1->u.rb, &ubi->used); 1060 dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum); 1061 } else if (!ubi->scrub.rb_node) { 1062 #else 1063 if (!ubi->scrub.rb_node) { 1064 #endif 1065 /* 1066 * Now pick the least worn-out used physical eraseblock and a 1067 * highly worn-out free physical eraseblock. If the erase 1068 * counters differ much enough, start wear-leveling. 1069 */ 1070 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb); 1071 e2 = get_peb_for_wl(ubi); 1072 if (!e2) 1073 goto out_cancel; 1074 1075 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) { 1076 dbg_wl("no WL needed: min used EC %d, max free EC %d", 1077 e1->ec, e2->ec); 1078 1079 /* Give the unused PEB back */ 1080 wl_tree_add(e2, &ubi->free); 1081 ubi->free_count++; 1082 goto out_cancel; 1083 } 1084 self_check_in_wl_tree(ubi, e1, &ubi->used); 1085 rb_erase(&e1->u.rb, &ubi->used); 1086 dbg_wl("move PEB %d EC %d to PEB %d EC %d", 1087 e1->pnum, e1->ec, e2->pnum, e2->ec); 1088 } else { 1089 /* Perform scrubbing */ 1090 scrubbing = 1; 1091 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb); 1092 e2 = get_peb_for_wl(ubi); 1093 if (!e2) 1094 goto out_cancel; 1095 1096 self_check_in_wl_tree(ubi, e1, &ubi->scrub); 1097 rb_erase(&e1->u.rb, &ubi->scrub); 1098 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum); 1099 } 1100 1101 ubi->move_from = e1; 1102 ubi->move_to = e2; 1103 spin_unlock(&ubi->wl_lock); 1104 1105 /* 1106 * Now we are going to copy physical eraseblock @e1->pnum to @e2->pnum. 1107 * We so far do not know which logical eraseblock our physical 1108 * eraseblock (@e1) belongs to. We have to read the volume identifier 1109 * header first. 1110 * 1111 * Note, we are protected from this PEB being unmapped and erased. The 1112 * 'ubi_wl_put_peb()' would wait for moving to be finished if the PEB 1113 * which is being moved was unmapped. 1114 */ 1115 1116 err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0); 1117 if (err && err != UBI_IO_BITFLIPS) { 1118 if (err == UBI_IO_FF) { 1119 /* 1120 * We are trying to move PEB without a VID header. UBI 1121 * always write VID headers shortly after the PEB was 1122 * given, so we have a situation when it has not yet 1123 * had a chance to write it, because it was preempted. 1124 * So add this PEB to the protection queue so far, 1125 * because presumably more data will be written there 1126 * (including the missing VID header), and then we'll 1127 * move it. 1128 */ 1129 dbg_wl("PEB %d has no VID header", e1->pnum); 1130 protect = 1; 1131 goto out_not_moved; 1132 } else if (err == UBI_IO_FF_BITFLIPS) { 1133 /* 1134 * The same situation as %UBI_IO_FF, but bit-flips were 1135 * detected. It is better to schedule this PEB for 1136 * scrubbing. 1137 */ 1138 dbg_wl("PEB %d has no VID header but has bit-flips", 1139 e1->pnum); 1140 scrubbing = 1; 1141 goto out_not_moved; 1142 } 1143 1144 ubi_err(ubi, "error %d while reading VID header from PEB %d", 1145 err, e1->pnum); 1146 goto out_error; 1147 } 1148 1149 vol_id = be32_to_cpu(vid_hdr->vol_id); 1150 lnum = be32_to_cpu(vid_hdr->lnum); 1151 1152 err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr); 1153 if (err) { 1154 if (err == MOVE_CANCEL_RACE) { 1155 /* 1156 * The LEB has not been moved because the volume is 1157 * being deleted or the PEB has been put meanwhile. We 1158 * should prevent this PEB from being selected for 1159 * wear-leveling movement again, so put it to the 1160 * protection queue. 1161 */ 1162 protect = 1; 1163 goto out_not_moved; 1164 } 1165 if (err == MOVE_RETRY) { 1166 scrubbing = 1; 1167 goto out_not_moved; 1168 } 1169 if (err == MOVE_TARGET_BITFLIPS || err == MOVE_TARGET_WR_ERR || 1170 err == MOVE_TARGET_RD_ERR) { 1171 /* 1172 * Target PEB had bit-flips or write error - torture it. 1173 */ 1174 torture = 1; 1175 goto out_not_moved; 1176 } 1177 1178 if (err == MOVE_SOURCE_RD_ERR) { 1179 /* 1180 * An error happened while reading the source PEB. Do 1181 * not switch to R/O mode in this case, and give the 1182 * upper layers a possibility to recover from this, 1183 * e.g. by unmapping corresponding LEB. Instead, just 1184 * put this PEB to the @ubi->erroneous list to prevent 1185 * UBI from trying to move it over and over again. 1186 */ 1187 if (ubi->erroneous_peb_count > ubi->max_erroneous) { 1188 ubi_err(ubi, "too many erroneous eraseblocks (%d)", 1189 ubi->erroneous_peb_count); 1190 goto out_error; 1191 } 1192 erroneous = 1; 1193 goto out_not_moved; 1194 } 1195 1196 if (err < 0) 1197 goto out_error; 1198 1199 ubi_assert(0); 1200 } 1201 1202 /* The PEB has been successfully moved */ 1203 if (scrubbing) 1204 ubi_msg(ubi, "scrubbed PEB %d (LEB %d:%d), data moved to PEB %d", 1205 e1->pnum, vol_id, lnum, e2->pnum); 1206 ubi_free_vid_hdr(ubi, vid_hdr); 1207 1208 spin_lock(&ubi->wl_lock); 1209 if (!ubi->move_to_put) { 1210 wl_tree_add(e2, &ubi->used); 1211 e2 = NULL; 1212 } 1213 ubi->move_from = ubi->move_to = NULL; 1214 ubi->move_to_put = ubi->wl_scheduled = 0; 1215 spin_unlock(&ubi->wl_lock); 1216 1217 err = do_sync_erase(ubi, e1, vol_id, lnum, 0); 1218 if (err) { 1219 if (e2) 1220 kmem_cache_free(ubi_wl_entry_slab, e2); 1221 goto out_ro; 1222 } 1223 1224 if (e2) { 1225 /* 1226 * Well, the target PEB was put meanwhile, schedule it for 1227 * erasure. 1228 */ 1229 dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase", 1230 e2->pnum, vol_id, lnum); 1231 err = do_sync_erase(ubi, e2, vol_id, lnum, 0); 1232 if (err) 1233 goto out_ro; 1234 } 1235 1236 dbg_wl("done"); 1237 mutex_unlock(&ubi->move_mutex); 1238 return 0; 1239 1240 /* 1241 * For some reasons the LEB was not moved, might be an error, might be 1242 * something else. @e1 was not changed, so return it back. @e2 might 1243 * have been changed, schedule it for erasure. 1244 */ 1245 out_not_moved: 1246 if (vol_id != -1) 1247 dbg_wl("cancel moving PEB %d (LEB %d:%d) to PEB %d (%d)", 1248 e1->pnum, vol_id, lnum, e2->pnum, err); 1249 else 1250 dbg_wl("cancel moving PEB %d to PEB %d (%d)", 1251 e1->pnum, e2->pnum, err); 1252 spin_lock(&ubi->wl_lock); 1253 if (protect) 1254 prot_queue_add(ubi, e1); 1255 else if (erroneous) { 1256 wl_tree_add(e1, &ubi->erroneous); 1257 ubi->erroneous_peb_count += 1; 1258 } else if (scrubbing) 1259 wl_tree_add(e1, &ubi->scrub); 1260 else 1261 wl_tree_add(e1, &ubi->used); 1262 ubi_assert(!ubi->move_to_put); 1263 ubi->move_from = ubi->move_to = NULL; 1264 ubi->wl_scheduled = 0; 1265 spin_unlock(&ubi->wl_lock); 1266 1267 ubi_free_vid_hdr(ubi, vid_hdr); 1268 err = do_sync_erase(ubi, e2, vol_id, lnum, torture); 1269 if (err) 1270 goto out_ro; 1271 1272 mutex_unlock(&ubi->move_mutex); 1273 return 0; 1274 1275 out_error: 1276 if (vol_id != -1) 1277 ubi_err(ubi, "error %d while moving PEB %d to PEB %d", 1278 err, e1->pnum, e2->pnum); 1279 else 1280 ubi_err(ubi, "error %d while moving PEB %d (LEB %d:%d) to PEB %d", 1281 err, e1->pnum, vol_id, lnum, e2->pnum); 1282 spin_lock(&ubi->wl_lock); 1283 ubi->move_from = ubi->move_to = NULL; 1284 ubi->move_to_put = ubi->wl_scheduled = 0; 1285 spin_unlock(&ubi->wl_lock); 1286 1287 ubi_free_vid_hdr(ubi, vid_hdr); 1288 kmem_cache_free(ubi_wl_entry_slab, e1); 1289 kmem_cache_free(ubi_wl_entry_slab, e2); 1290 1291 out_ro: 1292 ubi_ro_mode(ubi); 1293 mutex_unlock(&ubi->move_mutex); 1294 ubi_assert(err != 0); 1295 return err < 0 ? err : -EIO; 1296 1297 out_cancel: 1298 ubi->wl_scheduled = 0; 1299 spin_unlock(&ubi->wl_lock); 1300 mutex_unlock(&ubi->move_mutex); 1301 ubi_free_vid_hdr(ubi, vid_hdr); 1302 return 0; 1303 } 1304 1305 /** 1306 * ensure_wear_leveling - schedule wear-leveling if it is needed. 1307 * @ubi: UBI device description object 1308 * @nested: set to non-zero if this function is called from UBI worker 1309 * 1310 * This function checks if it is time to start wear-leveling and schedules it 1311 * if yes. This function returns zero in case of success and a negative error 1312 * code in case of failure. 1313 */ 1314 static int ensure_wear_leveling(struct ubi_device *ubi, int nested) 1315 { 1316 int err = 0; 1317 struct ubi_wl_entry *e1; 1318 struct ubi_wl_entry *e2; 1319 struct ubi_work *wrk; 1320 1321 spin_lock(&ubi->wl_lock); 1322 if (ubi->wl_scheduled) 1323 /* Wear-leveling is already in the work queue */ 1324 goto out_unlock; 1325 1326 /* 1327 * If the ubi->scrub tree is not empty, scrubbing is needed, and the 1328 * the WL worker has to be scheduled anyway. 1329 */ 1330 if (!ubi->scrub.rb_node) { 1331 if (!ubi->used.rb_node || !ubi->free.rb_node) 1332 /* No physical eraseblocks - no deal */ 1333 goto out_unlock; 1334 1335 /* 1336 * We schedule wear-leveling only if the difference between the 1337 * lowest erase counter of used physical eraseblocks and a high 1338 * erase counter of free physical eraseblocks is greater than 1339 * %UBI_WL_THRESHOLD. 1340 */ 1341 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb); 1342 e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF); 1343 1344 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) 1345 goto out_unlock; 1346 dbg_wl("schedule wear-leveling"); 1347 } else 1348 dbg_wl("schedule scrubbing"); 1349 1350 ubi->wl_scheduled = 1; 1351 spin_unlock(&ubi->wl_lock); 1352 1353 wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS); 1354 if (!wrk) { 1355 err = -ENOMEM; 1356 goto out_cancel; 1357 } 1358 1359 wrk->anchor = 0; 1360 wrk->func = &wear_leveling_worker; 1361 if (nested) 1362 __schedule_ubi_work(ubi, wrk); 1363 else 1364 schedule_ubi_work(ubi, wrk); 1365 return err; 1366 1367 out_cancel: 1368 spin_lock(&ubi->wl_lock); 1369 ubi->wl_scheduled = 0; 1370 out_unlock: 1371 spin_unlock(&ubi->wl_lock); 1372 return err; 1373 } 1374 1375 #ifdef CONFIG_MTD_UBI_FASTMAP 1376 /** 1377 * ubi_ensure_anchor_pebs - schedule wear-leveling to produce an anchor PEB. 1378 * @ubi: UBI device description object 1379 */ 1380 int ubi_ensure_anchor_pebs(struct ubi_device *ubi) 1381 { 1382 struct ubi_work *wrk; 1383 1384 spin_lock(&ubi->wl_lock); 1385 if (ubi->wl_scheduled) { 1386 spin_unlock(&ubi->wl_lock); 1387 return 0; 1388 } 1389 ubi->wl_scheduled = 1; 1390 spin_unlock(&ubi->wl_lock); 1391 1392 wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS); 1393 if (!wrk) { 1394 spin_lock(&ubi->wl_lock); 1395 ubi->wl_scheduled = 0; 1396 spin_unlock(&ubi->wl_lock); 1397 return -ENOMEM; 1398 } 1399 1400 wrk->anchor = 1; 1401 wrk->func = &wear_leveling_worker; 1402 schedule_ubi_work(ubi, wrk); 1403 return 0; 1404 } 1405 #endif 1406 1407 /** 1408 * erase_worker - physical eraseblock erase worker function. 1409 * @ubi: UBI device description object 1410 * @wl_wrk: the work object 1411 * @shutdown: non-zero if the worker has to free memory and exit 1412 * because the WL sub-system is shutting down 1413 * 1414 * This function erases a physical eraseblock and perform torture testing if 1415 * needed. It also takes care about marking the physical eraseblock bad if 1416 * needed. Returns zero in case of success and a negative error code in case of 1417 * failure. 1418 */ 1419 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, 1420 int shutdown) 1421 { 1422 struct ubi_wl_entry *e = wl_wrk->e; 1423 int pnum = e->pnum; 1424 int vol_id = wl_wrk->vol_id; 1425 int lnum = wl_wrk->lnum; 1426 int err, available_consumed = 0; 1427 1428 if (shutdown) { 1429 dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec); 1430 kfree(wl_wrk); 1431 kmem_cache_free(ubi_wl_entry_slab, e); 1432 return 0; 1433 } 1434 1435 dbg_wl("erase PEB %d EC %d LEB %d:%d", 1436 pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum); 1437 1438 ubi_assert(!ubi_is_fm_block(ubi, e->pnum)); 1439 1440 err = sync_erase(ubi, e, wl_wrk->torture); 1441 if (!err) { 1442 /* Fine, we've erased it successfully */ 1443 kfree(wl_wrk); 1444 1445 spin_lock(&ubi->wl_lock); 1446 wl_tree_add(e, &ubi->free); 1447 ubi->free_count++; 1448 spin_unlock(&ubi->wl_lock); 1449 1450 /* 1451 * One more erase operation has happened, take care about 1452 * protected physical eraseblocks. 1453 */ 1454 serve_prot_queue(ubi); 1455 1456 /* And take care about wear-leveling */ 1457 err = ensure_wear_leveling(ubi, 1); 1458 return err; 1459 } 1460 1461 ubi_err(ubi, "failed to erase PEB %d, error %d", pnum, err); 1462 kfree(wl_wrk); 1463 1464 if (err == -EINTR || err == -ENOMEM || err == -EAGAIN || 1465 err == -EBUSY) { 1466 int err1; 1467 1468 /* Re-schedule the LEB for erasure */ 1469 err1 = schedule_erase(ubi, e, vol_id, lnum, 0); 1470 if (err1) { 1471 err = err1; 1472 goto out_ro; 1473 } 1474 return err; 1475 } 1476 1477 kmem_cache_free(ubi_wl_entry_slab, e); 1478 if (err != -EIO) 1479 /* 1480 * If this is not %-EIO, we have no idea what to do. Scheduling 1481 * this physical eraseblock for erasure again would cause 1482 * errors again and again. Well, lets switch to R/O mode. 1483 */ 1484 goto out_ro; 1485 1486 /* It is %-EIO, the PEB went bad */ 1487 1488 if (!ubi->bad_allowed) { 1489 ubi_err(ubi, "bad physical eraseblock %d detected", pnum); 1490 goto out_ro; 1491 } 1492 1493 spin_lock(&ubi->volumes_lock); 1494 if (ubi->beb_rsvd_pebs == 0) { 1495 if (ubi->avail_pebs == 0) { 1496 spin_unlock(&ubi->volumes_lock); 1497 ubi_err(ubi, "no reserved/available physical eraseblocks"); 1498 goto out_ro; 1499 } 1500 ubi->avail_pebs -= 1; 1501 available_consumed = 1; 1502 } 1503 spin_unlock(&ubi->volumes_lock); 1504 1505 ubi_msg(ubi, "mark PEB %d as bad", pnum); 1506 err = ubi_io_mark_bad(ubi, pnum); 1507 if (err) 1508 goto out_ro; 1509 1510 spin_lock(&ubi->volumes_lock); 1511 if (ubi->beb_rsvd_pebs > 0) { 1512 if (available_consumed) { 1513 /* 1514 * The amount of reserved PEBs increased since we last 1515 * checked. 1516 */ 1517 ubi->avail_pebs += 1; 1518 available_consumed = 0; 1519 } 1520 ubi->beb_rsvd_pebs -= 1; 1521 } 1522 ubi->bad_peb_count += 1; 1523 ubi->good_peb_count -= 1; 1524 ubi_calculate_reserved(ubi); 1525 if (available_consumed) 1526 ubi_warn(ubi, "no PEBs in the reserved pool, used an available PEB"); 1527 else if (ubi->beb_rsvd_pebs) 1528 ubi_msg(ubi, "%d PEBs left in the reserve", 1529 ubi->beb_rsvd_pebs); 1530 else 1531 ubi_warn(ubi, "last PEB from the reserve was used"); 1532 spin_unlock(&ubi->volumes_lock); 1533 1534 return err; 1535 1536 out_ro: 1537 if (available_consumed) { 1538 spin_lock(&ubi->volumes_lock); 1539 ubi->avail_pebs += 1; 1540 spin_unlock(&ubi->volumes_lock); 1541 } 1542 ubi_ro_mode(ubi); 1543 return err; 1544 } 1545 1546 /** 1547 * ubi_wl_put_peb - return a PEB to the wear-leveling sub-system. 1548 * @ubi: UBI device description object 1549 * @vol_id: the volume ID that last used this PEB 1550 * @lnum: the last used logical eraseblock number for the PEB 1551 * @pnum: physical eraseblock to return 1552 * @torture: if this physical eraseblock has to be tortured 1553 * 1554 * This function is called to return physical eraseblock @pnum to the pool of 1555 * free physical eraseblocks. The @torture flag has to be set if an I/O error 1556 * occurred to this @pnum and it has to be tested. This function returns zero 1557 * in case of success, and a negative error code in case of failure. 1558 */ 1559 int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum, 1560 int pnum, int torture) 1561 { 1562 int err; 1563 struct ubi_wl_entry *e; 1564 1565 dbg_wl("PEB %d", pnum); 1566 ubi_assert(pnum >= 0); 1567 ubi_assert(pnum < ubi->peb_count); 1568 1569 retry: 1570 spin_lock(&ubi->wl_lock); 1571 e = ubi->lookuptbl[pnum]; 1572 if (e == ubi->move_from) { 1573 /* 1574 * User is putting the physical eraseblock which was selected to 1575 * be moved. It will be scheduled for erasure in the 1576 * wear-leveling worker. 1577 */ 1578 dbg_wl("PEB %d is being moved, wait", pnum); 1579 spin_unlock(&ubi->wl_lock); 1580 1581 /* Wait for the WL worker by taking the @ubi->move_mutex */ 1582 mutex_lock(&ubi->move_mutex); 1583 mutex_unlock(&ubi->move_mutex); 1584 goto retry; 1585 } else if (e == ubi->move_to) { 1586 /* 1587 * User is putting the physical eraseblock which was selected 1588 * as the target the data is moved to. It may happen if the EBA 1589 * sub-system already re-mapped the LEB in 'ubi_eba_copy_leb()' 1590 * but the WL sub-system has not put the PEB to the "used" tree 1591 * yet, but it is about to do this. So we just set a flag which 1592 * will tell the WL worker that the PEB is not needed anymore 1593 * and should be scheduled for erasure. 1594 */ 1595 dbg_wl("PEB %d is the target of data moving", pnum); 1596 ubi_assert(!ubi->move_to_put); 1597 ubi->move_to_put = 1; 1598 spin_unlock(&ubi->wl_lock); 1599 return 0; 1600 } else { 1601 if (in_wl_tree(e, &ubi->used)) { 1602 self_check_in_wl_tree(ubi, e, &ubi->used); 1603 rb_erase(&e->u.rb, &ubi->used); 1604 } else if (in_wl_tree(e, &ubi->scrub)) { 1605 self_check_in_wl_tree(ubi, e, &ubi->scrub); 1606 rb_erase(&e->u.rb, &ubi->scrub); 1607 } else if (in_wl_tree(e, &ubi->erroneous)) { 1608 self_check_in_wl_tree(ubi, e, &ubi->erroneous); 1609 rb_erase(&e->u.rb, &ubi->erroneous); 1610 ubi->erroneous_peb_count -= 1; 1611 ubi_assert(ubi->erroneous_peb_count >= 0); 1612 /* Erroneous PEBs should be tortured */ 1613 torture = 1; 1614 } else { 1615 err = prot_queue_del(ubi, e->pnum); 1616 if (err) { 1617 ubi_err(ubi, "PEB %d not found", pnum); 1618 ubi_ro_mode(ubi); 1619 spin_unlock(&ubi->wl_lock); 1620 return err; 1621 } 1622 } 1623 } 1624 spin_unlock(&ubi->wl_lock); 1625 1626 err = schedule_erase(ubi, e, vol_id, lnum, torture); 1627 if (err) { 1628 spin_lock(&ubi->wl_lock); 1629 wl_tree_add(e, &ubi->used); 1630 spin_unlock(&ubi->wl_lock); 1631 } 1632 1633 return err; 1634 } 1635 1636 /** 1637 * ubi_wl_scrub_peb - schedule a physical eraseblock for scrubbing. 1638 * @ubi: UBI device description object 1639 * @pnum: the physical eraseblock to schedule 1640 * 1641 * If a bit-flip in a physical eraseblock is detected, this physical eraseblock 1642 * needs scrubbing. This function schedules a physical eraseblock for 1643 * scrubbing which is done in background. This function returns zero in case of 1644 * success and a negative error code in case of failure. 1645 */ 1646 int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum) 1647 { 1648 struct ubi_wl_entry *e; 1649 1650 ubi_msg(ubi, "schedule PEB %d for scrubbing", pnum); 1651 1652 retry: 1653 spin_lock(&ubi->wl_lock); 1654 e = ubi->lookuptbl[pnum]; 1655 if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) || 1656 in_wl_tree(e, &ubi->erroneous)) { 1657 spin_unlock(&ubi->wl_lock); 1658 return 0; 1659 } 1660 1661 if (e == ubi->move_to) { 1662 /* 1663 * This physical eraseblock was used to move data to. The data 1664 * was moved but the PEB was not yet inserted to the proper 1665 * tree. We should just wait a little and let the WL worker 1666 * proceed. 1667 */ 1668 spin_unlock(&ubi->wl_lock); 1669 dbg_wl("the PEB %d is not in proper tree, retry", pnum); 1670 yield(); 1671 goto retry; 1672 } 1673 1674 if (in_wl_tree(e, &ubi->used)) { 1675 self_check_in_wl_tree(ubi, e, &ubi->used); 1676 rb_erase(&e->u.rb, &ubi->used); 1677 } else { 1678 int err; 1679 1680 err = prot_queue_del(ubi, e->pnum); 1681 if (err) { 1682 ubi_err(ubi, "PEB %d not found", pnum); 1683 ubi_ro_mode(ubi); 1684 spin_unlock(&ubi->wl_lock); 1685 return err; 1686 } 1687 } 1688 1689 wl_tree_add(e, &ubi->scrub); 1690 spin_unlock(&ubi->wl_lock); 1691 1692 /* 1693 * Technically scrubbing is the same as wear-leveling, so it is done 1694 * by the WL worker. 1695 */ 1696 return ensure_wear_leveling(ubi, 0); 1697 } 1698 1699 /** 1700 * ubi_wl_flush - flush all pending works. 1701 * @ubi: UBI device description object 1702 * @vol_id: the volume id to flush for 1703 * @lnum: the logical eraseblock number to flush for 1704 * 1705 * This function executes all pending works for a particular volume id / 1706 * logical eraseblock number pair. If either value is set to %UBI_ALL, then it 1707 * acts as a wildcard for all of the corresponding volume numbers or logical 1708 * eraseblock numbers. It returns zero in case of success and a negative error 1709 * code in case of failure. 1710 */ 1711 int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum) 1712 { 1713 int err = 0; 1714 int found = 1; 1715 1716 /* 1717 * Erase while the pending works queue is not empty, but not more than 1718 * the number of currently pending works. 1719 */ 1720 dbg_wl("flush pending work for LEB %d:%d (%d pending works)", 1721 vol_id, lnum, ubi->works_count); 1722 1723 while (found) { 1724 struct ubi_work *wrk, *tmp; 1725 found = 0; 1726 1727 down_read(&ubi->work_sem); 1728 spin_lock(&ubi->wl_lock); 1729 list_for_each_entry_safe(wrk, tmp, &ubi->works, list) { 1730 if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) && 1731 (lnum == UBI_ALL || wrk->lnum == lnum)) { 1732 list_del(&wrk->list); 1733 ubi->works_count -= 1; 1734 ubi_assert(ubi->works_count >= 0); 1735 spin_unlock(&ubi->wl_lock); 1736 1737 err = wrk->func(ubi, wrk, 0); 1738 if (err) { 1739 up_read(&ubi->work_sem); 1740 return err; 1741 } 1742 1743 spin_lock(&ubi->wl_lock); 1744 found = 1; 1745 break; 1746 } 1747 } 1748 spin_unlock(&ubi->wl_lock); 1749 up_read(&ubi->work_sem); 1750 } 1751 1752 /* 1753 * Make sure all the works which have been done in parallel are 1754 * finished. 1755 */ 1756 down_write(&ubi->work_sem); 1757 up_write(&ubi->work_sem); 1758 1759 return err; 1760 } 1761 1762 /** 1763 * tree_destroy - destroy an RB-tree. 1764 * @root: the root of the tree to destroy 1765 */ 1766 static void tree_destroy(struct rb_root *root) 1767 { 1768 struct rb_node *rb; 1769 struct ubi_wl_entry *e; 1770 1771 rb = root->rb_node; 1772 while (rb) { 1773 if (rb->rb_left) 1774 rb = rb->rb_left; 1775 else if (rb->rb_right) 1776 rb = rb->rb_right; 1777 else { 1778 e = rb_entry(rb, struct ubi_wl_entry, u.rb); 1779 1780 rb = rb_parent(rb); 1781 if (rb) { 1782 if (rb->rb_left == &e->u.rb) 1783 rb->rb_left = NULL; 1784 else 1785 rb->rb_right = NULL; 1786 } 1787 1788 kmem_cache_free(ubi_wl_entry_slab, e); 1789 } 1790 } 1791 } 1792 1793 /** 1794 * ubi_thread - UBI background thread. 1795 * @u: the UBI device description object pointer 1796 */ 1797 int ubi_thread(void *u) 1798 { 1799 int failures = 0; 1800 struct ubi_device *ubi = u; 1801 1802 ubi_msg(ubi, "background thread \"%s\" started, PID %d", 1803 ubi->bgt_name, task_pid_nr(current)); 1804 1805 set_freezable(); 1806 for (;;) { 1807 int err; 1808 1809 if (kthread_should_stop()) { 1810 ubi_msg(ubi, "background thread \"%s\" should stop, PID %d", 1811 ubi->bgt_name, task_pid_nr(current)); 1812 break; 1813 } 1814 1815 if (try_to_freeze()) 1816 continue; 1817 1818 spin_lock(&ubi->wl_lock); 1819 if (list_empty(&ubi->works) || ubi->ro_mode || 1820 !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi)) { 1821 set_current_state(TASK_INTERRUPTIBLE); 1822 spin_unlock(&ubi->wl_lock); 1823 schedule(); 1824 continue; 1825 } 1826 spin_unlock(&ubi->wl_lock); 1827 1828 err = do_work(ubi); 1829 if (err) { 1830 ubi_err(ubi, "%s: work failed with error code %d", 1831 ubi->bgt_name, err); 1832 if (failures++ > WL_MAX_FAILURES) { 1833 /* 1834 * Too many failures, disable the thread and 1835 * switch to read-only mode. 1836 */ 1837 ubi_msg(ubi, "%s: %d consecutive failures", 1838 ubi->bgt_name, WL_MAX_FAILURES); 1839 ubi_ro_mode(ubi); 1840 ubi->thread_enabled = 0; 1841 continue; 1842 } 1843 } else 1844 failures = 0; 1845 1846 cond_resched(); 1847 } 1848 1849 dbg_wl("background thread \"%s\" is killed", ubi->bgt_name); 1850 return 0; 1851 } 1852 1853 /** 1854 * shutdown_work - shutdown all pending works. 1855 * @ubi: UBI device description object 1856 */ 1857 static void shutdown_work(struct ubi_device *ubi) 1858 { 1859 while (!list_empty(&ubi->works)) { 1860 struct ubi_work *wrk; 1861 1862 wrk = list_entry(ubi->works.next, struct ubi_work, list); 1863 list_del(&wrk->list); 1864 wrk->func(ubi, wrk, 1); 1865 ubi->works_count -= 1; 1866 ubi_assert(ubi->works_count >= 0); 1867 } 1868 } 1869 1870 /** 1871 * ubi_wl_init - initialize the WL sub-system using attaching information. 1872 * @ubi: UBI device description object 1873 * @ai: attaching information 1874 * 1875 * This function returns zero in case of success, and a negative error code in 1876 * case of failure. 1877 */ 1878 int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) 1879 { 1880 int err, i, reserved_pebs, found_pebs = 0; 1881 struct rb_node *rb1, *rb2; 1882 struct ubi_ainf_volume *av; 1883 struct ubi_ainf_peb *aeb, *tmp; 1884 struct ubi_wl_entry *e; 1885 1886 ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT; 1887 spin_lock_init(&ubi->wl_lock); 1888 mutex_init(&ubi->move_mutex); 1889 init_rwsem(&ubi->work_sem); 1890 ubi->max_ec = ai->max_ec; 1891 INIT_LIST_HEAD(&ubi->works); 1892 #ifdef CONFIG_MTD_UBI_FASTMAP 1893 INIT_WORK(&ubi->fm_work, update_fastmap_work_fn); 1894 #endif 1895 1896 sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num); 1897 1898 err = -ENOMEM; 1899 ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL); 1900 if (!ubi->lookuptbl) 1901 return err; 1902 1903 for (i = 0; i < UBI_PROT_QUEUE_LEN; i++) 1904 INIT_LIST_HEAD(&ubi->pq[i]); 1905 ubi->pq_head = 0; 1906 1907 list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) { 1908 cond_resched(); 1909 1910 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); 1911 if (!e) 1912 goto out_free; 1913 1914 e->pnum = aeb->pnum; 1915 e->ec = aeb->ec; 1916 ubi_assert(!ubi_is_fm_block(ubi, e->pnum)); 1917 ubi->lookuptbl[e->pnum] = e; 1918 if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) { 1919 kmem_cache_free(ubi_wl_entry_slab, e); 1920 goto out_free; 1921 } 1922 1923 found_pebs++; 1924 } 1925 1926 ubi->free_count = 0; 1927 list_for_each_entry(aeb, &ai->free, u.list) { 1928 cond_resched(); 1929 1930 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); 1931 if (!e) 1932 goto out_free; 1933 1934 e->pnum = aeb->pnum; 1935 e->ec = aeb->ec; 1936 ubi_assert(e->ec >= 0); 1937 ubi_assert(!ubi_is_fm_block(ubi, e->pnum)); 1938 1939 wl_tree_add(e, &ubi->free); 1940 ubi->free_count++; 1941 1942 ubi->lookuptbl[e->pnum] = e; 1943 1944 found_pebs++; 1945 } 1946 1947 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) { 1948 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) { 1949 cond_resched(); 1950 1951 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); 1952 if (!e) 1953 goto out_free; 1954 1955 e->pnum = aeb->pnum; 1956 e->ec = aeb->ec; 1957 ubi->lookuptbl[e->pnum] = e; 1958 1959 if (!aeb->scrub) { 1960 dbg_wl("add PEB %d EC %d to the used tree", 1961 e->pnum, e->ec); 1962 wl_tree_add(e, &ubi->used); 1963 } else { 1964 dbg_wl("add PEB %d EC %d to the scrub tree", 1965 e->pnum, e->ec); 1966 wl_tree_add(e, &ubi->scrub); 1967 } 1968 1969 found_pebs++; 1970 } 1971 } 1972 1973 dbg_wl("found %i PEBs", found_pebs); 1974 1975 if (ubi->fm) 1976 ubi_assert(ubi->good_peb_count == \ 1977 found_pebs + ubi->fm->used_blocks); 1978 else 1979 ubi_assert(ubi->good_peb_count == found_pebs); 1980 1981 reserved_pebs = WL_RESERVED_PEBS; 1982 #ifdef CONFIG_MTD_UBI_FASTMAP 1983 /* Reserve enough LEBs to store two fastmaps. */ 1984 reserved_pebs += (ubi->fm_size / ubi->leb_size) * 2; 1985 #endif 1986 1987 if (ubi->avail_pebs < reserved_pebs) { 1988 ubi_err(ubi, "no enough physical eraseblocks (%d, need %d)", 1989 ubi->avail_pebs, reserved_pebs); 1990 if (ubi->corr_peb_count) 1991 ubi_err(ubi, "%d PEBs are corrupted and not used", 1992 ubi->corr_peb_count); 1993 goto out_free; 1994 } 1995 ubi->avail_pebs -= reserved_pebs; 1996 ubi->rsvd_pebs += reserved_pebs; 1997 1998 /* Schedule wear-leveling if needed */ 1999 err = ensure_wear_leveling(ubi, 0); 2000 if (err) 2001 goto out_free; 2002 2003 return 0; 2004 2005 out_free: 2006 shutdown_work(ubi); 2007 tree_destroy(&ubi->used); 2008 tree_destroy(&ubi->free); 2009 tree_destroy(&ubi->scrub); 2010 kfree(ubi->lookuptbl); 2011 return err; 2012 } 2013 2014 /** 2015 * protection_queue_destroy - destroy the protection queue. 2016 * @ubi: UBI device description object 2017 */ 2018 static void protection_queue_destroy(struct ubi_device *ubi) 2019 { 2020 int i; 2021 struct ubi_wl_entry *e, *tmp; 2022 2023 for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) { 2024 list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) { 2025 list_del(&e->u.list); 2026 kmem_cache_free(ubi_wl_entry_slab, e); 2027 } 2028 } 2029 } 2030 2031 /** 2032 * ubi_wl_close - close the wear-leveling sub-system. 2033 * @ubi: UBI device description object 2034 */ 2035 void ubi_wl_close(struct ubi_device *ubi) 2036 { 2037 dbg_wl("close the WL sub-system"); 2038 shutdown_work(ubi); 2039 protection_queue_destroy(ubi); 2040 tree_destroy(&ubi->used); 2041 tree_destroy(&ubi->erroneous); 2042 tree_destroy(&ubi->free); 2043 tree_destroy(&ubi->scrub); 2044 kfree(ubi->lookuptbl); 2045 } 2046 2047 /** 2048 * self_check_ec - make sure that the erase counter of a PEB is correct. 2049 * @ubi: UBI device description object 2050 * @pnum: the physical eraseblock number to check 2051 * @ec: the erase counter to check 2052 * 2053 * This function returns zero if the erase counter of physical eraseblock @pnum 2054 * is equivalent to @ec, and a negative error code if not or if an error 2055 * occurred. 2056 */ 2057 static int self_check_ec(struct ubi_device *ubi, int pnum, int ec) 2058 { 2059 int err; 2060 long long read_ec; 2061 struct ubi_ec_hdr *ec_hdr; 2062 2063 if (!ubi_dbg_chk_gen(ubi)) 2064 return 0; 2065 2066 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS); 2067 if (!ec_hdr) 2068 return -ENOMEM; 2069 2070 err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0); 2071 if (err && err != UBI_IO_BITFLIPS) { 2072 /* The header does not have to exist */ 2073 err = 0; 2074 goto out_free; 2075 } 2076 2077 read_ec = be64_to_cpu(ec_hdr->ec); 2078 if (ec != read_ec && read_ec - ec > 1) { 2079 ubi_err(ubi, "self-check failed for PEB %d", pnum); 2080 ubi_err(ubi, "read EC is %lld, should be %d", read_ec, ec); 2081 dump_stack(); 2082 err = 1; 2083 } else 2084 err = 0; 2085 2086 out_free: 2087 kfree(ec_hdr); 2088 return err; 2089 } 2090 2091 /** 2092 * self_check_in_wl_tree - check that wear-leveling entry is in WL RB-tree. 2093 * @ubi: UBI device description object 2094 * @e: the wear-leveling entry to check 2095 * @root: the root of the tree 2096 * 2097 * This function returns zero if @e is in the @root RB-tree and %-EINVAL if it 2098 * is not. 2099 */ 2100 static int self_check_in_wl_tree(const struct ubi_device *ubi, 2101 struct ubi_wl_entry *e, struct rb_root *root) 2102 { 2103 if (!ubi_dbg_chk_gen(ubi)) 2104 return 0; 2105 2106 if (in_wl_tree(e, root)) 2107 return 0; 2108 2109 ubi_err(ubi, "self-check failed for PEB %d, EC %d, RB-tree %p ", 2110 e->pnum, e->ec, root); 2111 dump_stack(); 2112 return -EINVAL; 2113 } 2114 2115 /** 2116 * self_check_in_pq - check if wear-leveling entry is in the protection 2117 * queue. 2118 * @ubi: UBI device description object 2119 * @e: the wear-leveling entry to check 2120 * 2121 * This function returns zero if @e is in @ubi->pq and %-EINVAL if it is not. 2122 */ 2123 static int self_check_in_pq(const struct ubi_device *ubi, 2124 struct ubi_wl_entry *e) 2125 { 2126 struct ubi_wl_entry *p; 2127 int i; 2128 2129 if (!ubi_dbg_chk_gen(ubi)) 2130 return 0; 2131 2132 for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) 2133 list_for_each_entry(p, &ubi->pq[i], u.list) 2134 if (p == e) 2135 return 0; 2136 2137 ubi_err(ubi, "self-check failed for PEB %d, EC %d, Protect queue", 2138 e->pnum, e->ec); 2139 dump_stack(); 2140 return -EINVAL; 2141 } 2142