1 /* 2 * Copyright (c) International Business Machines Corp., 2006 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See 12 * the GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 * 18 * Authors: Artem Bityutskiy (Битюцкий Артём), Thomas Gleixner 19 */ 20 21 /* 22 * UBI wear-leveling unit. 23 * 24 * This unit is responsible for wear-leveling. It works in terms of physical 25 * eraseblocks and erase counters and knows nothing about logical eraseblocks, 26 * volumes, etc. From this unit's perspective all physical eraseblocks are of 27 * two types - used and free. Used physical eraseblocks are those that were 28 * "get" by the 'ubi_wl_get_peb()' function, and free physical eraseblocks are 29 * those that were put by the 'ubi_wl_put_peb()' function. 30 * 31 * Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter 32 * header. The rest of the physical eraseblock contains only 0xFF bytes. 33 * 34 * When physical eraseblocks are returned to the WL unit by means of the 35 * 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is 36 * done asynchronously in context of the per-UBI device background thread, 37 * which is also managed by the WL unit. 38 * 39 * The wear-leveling is ensured by means of moving the contents of used 40 * physical eraseblocks with low erase counter to free physical eraseblocks 41 * with high erase counter. 42 * 43 * The 'ubi_wl_get_peb()' function accepts data type hints which help to pick 44 * an "optimal" physical eraseblock. For example, when it is known that the 45 * physical eraseblock will be "put" soon because it contains short-term data, 46 * the WL unit may pick a free physical eraseblock with low erase counter, and 47 * so forth. 48 * 49 * If the WL unit fails to erase a physical eraseblock, it marks it as bad. 50 * 51 * This unit is also responsible for scrubbing. If a bit-flip is detected in a 52 * physical eraseblock, it has to be moved. Technically this is the same as 53 * moving it for wear-leveling reasons. 54 * 55 * As it was said, for the UBI unit all physical eraseblocks are either "free" 56 * or "used". Free eraseblock are kept in the @wl->free RB-tree, while used 57 * eraseblocks are kept in a set of different RB-trees: @wl->used, 58 * @wl->prot.pnum, @wl->prot.aec, and @wl->scrub. 59 * 60 * Note, in this implementation, we keep a small in-RAM object for each physical 61 * eraseblock. This is surely not a scalable solution. But it appears to be good 62 * enough for moderately large flashes and it is simple. In future, one may 63 * re-work this unit and make it more scalable. 64 * 65 * At the moment this unit does not utilize the sequence number, which was 66 * introduced relatively recently. But it would be wise to do this because the 67 * sequence number of a logical eraseblock characterizes how old is it. For 68 * example, when we move a PEB with low erase counter, and we need to pick the 69 * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we 70 * pick target PEB with an average EC if our PEB is not very "old". This is a 71 * room for future re-works of the WL unit. 72 * 73 * FIXME: looks too complex, should be simplified (later). 74 */ 75 76 #ifdef UBI_LINUX 77 #include <linux/slab.h> 78 #include <linux/crc32.h> 79 #include <linux/freezer.h> 80 #include <linux/kthread.h> 81 #endif 82 83 #include <ubi_uboot.h> 84 #include "ubi.h" 85 86 /* Number of physical eraseblocks reserved for wear-leveling purposes */ 87 #define WL_RESERVED_PEBS 1 88 89 /* 90 * How many erase cycles are short term, unknown, and long term physical 91 * eraseblocks protected. 92 */ 93 #define ST_PROTECTION 16 94 #define U_PROTECTION 10 95 #define LT_PROTECTION 4 96 97 /* 98 * Maximum difference between two erase counters. If this threshold is 99 * exceeded, the WL unit starts moving data from used physical eraseblocks with 100 * low erase counter to free physical eraseblocks with high erase counter. 101 */ 102 #define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD 103 104 /* 105 * When a physical eraseblock is moved, the WL unit has to pick the target 106 * physical eraseblock to move to. The simplest way would be just to pick the 107 * one with the highest erase counter. But in certain workloads this could lead 108 * to an unlimited wear of one or few physical eraseblock. Indeed, imagine a 109 * situation when the picked physical eraseblock is constantly erased after the 110 * data is written to it. So, we have a constant which limits the highest erase 111 * counter of the free physical eraseblock to pick. Namely, the WL unit does 112 * not pick eraseblocks with erase counter greater then the lowest erase 113 * counter plus %WL_FREE_MAX_DIFF. 114 */ 115 #define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD) 116 117 /* 118 * Maximum number of consecutive background thread failures which is enough to 119 * switch to read-only mode. 120 */ 121 #define WL_MAX_FAILURES 32 122 123 /** 124 * struct ubi_wl_prot_entry - PEB protection entry. 125 * @rb_pnum: link in the @wl->prot.pnum RB-tree 126 * @rb_aec: link in the @wl->prot.aec RB-tree 127 * @abs_ec: the absolute erase counter value when the protection ends 128 * @e: the wear-leveling entry of the physical eraseblock under protection 129 * 130 * When the WL unit returns a physical eraseblock, the physical eraseblock is 131 * protected from being moved for some "time". For this reason, the physical 132 * eraseblock is not directly moved from the @wl->free tree to the @wl->used 133 * tree. There is one more tree in between where this physical eraseblock is 134 * temporarily stored (@wl->prot). 135 * 136 * All this protection stuff is needed because: 137 * o we don't want to move physical eraseblocks just after we have given them 138 * to the user; instead, we first want to let users fill them up with data; 139 * 140 * o there is a chance that the user will put the physical eraseblock very 141 * soon, so it makes sense not to move it for some time, but wait; this is 142 * especially important in case of "short term" physical eraseblocks. 143 * 144 * Physical eraseblocks stay protected only for limited time. But the "time" is 145 * measured in erase cycles in this case. This is implemented with help of the 146 * absolute erase counter (@wl->abs_ec). When it reaches certain value, the 147 * physical eraseblocks are moved from the protection trees (@wl->prot.*) to 148 * the @wl->used tree. 149 * 150 * Protected physical eraseblocks are searched by physical eraseblock number 151 * (when they are put) and by the absolute erase counter (to check if it is 152 * time to move them to the @wl->used tree). So there are actually 2 RB-trees 153 * storing the protected physical eraseblocks: @wl->prot.pnum and 154 * @wl->prot.aec. They are referred to as the "protection" trees. The 155 * first one is indexed by the physical eraseblock number. The second one is 156 * indexed by the absolute erase counter. Both trees store 157 * &struct ubi_wl_prot_entry objects. 158 * 159 * Each physical eraseblock has 2 main states: free and used. The former state 160 * corresponds to the @wl->free tree. The latter state is split up on several 161 * sub-states: 162 * o the WL movement is allowed (@wl->used tree); 163 * o the WL movement is temporarily prohibited (@wl->prot.pnum and 164 * @wl->prot.aec trees); 165 * o scrubbing is needed (@wl->scrub tree). 166 * 167 * Depending on the sub-state, wear-leveling entries of the used physical 168 * eraseblocks may be kept in one of those trees. 169 */ 170 struct ubi_wl_prot_entry { 171 struct rb_node rb_pnum; 172 struct rb_node rb_aec; 173 unsigned long long abs_ec; 174 struct ubi_wl_entry *e; 175 }; 176 177 /** 178 * struct ubi_work - UBI work description data structure. 179 * @list: a link in the list of pending works 180 * @func: worker function 181 * @priv: private data of the worker function 182 * 183 * @e: physical eraseblock to erase 184 * @torture: if the physical eraseblock has to be tortured 185 * 186 * The @func pointer points to the worker function. If the @cancel argument is 187 * not zero, the worker has to free the resources and exit immediately. The 188 * worker has to return zero in case of success and a negative error code in 189 * case of failure. 190 */ 191 struct ubi_work { 192 struct list_head list; 193 int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int cancel); 194 /* The below fields are only relevant to erasure works */ 195 struct ubi_wl_entry *e; 196 int torture; 197 }; 198 199 #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID 200 static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec); 201 static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e, 202 struct rb_root *root); 203 #else 204 #define paranoid_check_ec(ubi, pnum, ec) 0 205 #define paranoid_check_in_wl_tree(e, root) 206 #endif 207 208 /** 209 * wl_tree_add - add a wear-leveling entry to a WL RB-tree. 210 * @e: the wear-leveling entry to add 211 * @root: the root of the tree 212 * 213 * Note, we use (erase counter, physical eraseblock number) pairs as keys in 214 * the @ubi->used and @ubi->free RB-trees. 215 */ 216 static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root) 217 { 218 struct rb_node **p, *parent = NULL; 219 220 p = &root->rb_node; 221 while (*p) { 222 struct ubi_wl_entry *e1; 223 224 parent = *p; 225 e1 = rb_entry(parent, struct ubi_wl_entry, rb); 226 227 if (e->ec < e1->ec) 228 p = &(*p)->rb_left; 229 else if (e->ec > e1->ec) 230 p = &(*p)->rb_right; 231 else { 232 ubi_assert(e->pnum != e1->pnum); 233 if (e->pnum < e1->pnum) 234 p = &(*p)->rb_left; 235 else 236 p = &(*p)->rb_right; 237 } 238 } 239 240 rb_link_node(&e->rb, parent, p); 241 rb_insert_color(&e->rb, root); 242 } 243 244 /** 245 * do_work - do one pending work. 246 * @ubi: UBI device description object 247 * 248 * This function returns zero in case of success and a negative error code in 249 * case of failure. 250 */ 251 static int do_work(struct ubi_device *ubi) 252 { 253 int err; 254 struct ubi_work *wrk; 255 256 cond_resched(); 257 258 /* 259 * @ubi->work_sem is used to synchronize with the workers. Workers take 260 * it in read mode, so many of them may be doing works at a time. But 261 * the queue flush code has to be sure the whole queue of works is 262 * done, and it takes the mutex in write mode. 263 */ 264 down_read(&ubi->work_sem); 265 spin_lock(&ubi->wl_lock); 266 if (list_empty(&ubi->works)) { 267 spin_unlock(&ubi->wl_lock); 268 up_read(&ubi->work_sem); 269 return 0; 270 } 271 272 wrk = list_entry(ubi->works.next, struct ubi_work, list); 273 list_del(&wrk->list); 274 ubi->works_count -= 1; 275 ubi_assert(ubi->works_count >= 0); 276 spin_unlock(&ubi->wl_lock); 277 278 /* 279 * Call the worker function. Do not touch the work structure 280 * after this call as it will have been freed or reused by that 281 * time by the worker function. 282 */ 283 err = wrk->func(ubi, wrk, 0); 284 if (err) 285 ubi_err("work failed with error code %d", err); 286 up_read(&ubi->work_sem); 287 288 return err; 289 } 290 291 /** 292 * produce_free_peb - produce a free physical eraseblock. 293 * @ubi: UBI device description object 294 * 295 * This function tries to make a free PEB by means of synchronous execution of 296 * pending works. This may be needed if, for example the background thread is 297 * disabled. Returns zero in case of success and a negative error code in case 298 * of failure. 299 */ 300 static int produce_free_peb(struct ubi_device *ubi) 301 { 302 int err; 303 304 spin_lock(&ubi->wl_lock); 305 while (!ubi->free.rb_node) { 306 spin_unlock(&ubi->wl_lock); 307 308 dbg_wl("do one work synchronously"); 309 err = do_work(ubi); 310 if (err) 311 return err; 312 313 spin_lock(&ubi->wl_lock); 314 } 315 spin_unlock(&ubi->wl_lock); 316 317 return 0; 318 } 319 320 /** 321 * in_wl_tree - check if wear-leveling entry is present in a WL RB-tree. 322 * @e: the wear-leveling entry to check 323 * @root: the root of the tree 324 * 325 * This function returns non-zero if @e is in the @root RB-tree and zero if it 326 * is not. 327 */ 328 static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root) 329 { 330 struct rb_node *p; 331 332 p = root->rb_node; 333 while (p) { 334 struct ubi_wl_entry *e1; 335 336 e1 = rb_entry(p, struct ubi_wl_entry, rb); 337 338 if (e->pnum == e1->pnum) { 339 ubi_assert(e == e1); 340 return 1; 341 } 342 343 if (e->ec < e1->ec) 344 p = p->rb_left; 345 else if (e->ec > e1->ec) 346 p = p->rb_right; 347 else { 348 ubi_assert(e->pnum != e1->pnum); 349 if (e->pnum < e1->pnum) 350 p = p->rb_left; 351 else 352 p = p->rb_right; 353 } 354 } 355 356 return 0; 357 } 358 359 /** 360 * prot_tree_add - add physical eraseblock to protection trees. 361 * @ubi: UBI device description object 362 * @e: the physical eraseblock to add 363 * @pe: protection entry object to use 364 * @abs_ec: absolute erase counter value when this physical eraseblock has 365 * to be removed from the protection trees. 366 * 367 * @wl->lock has to be locked. 368 */ 369 static void prot_tree_add(struct ubi_device *ubi, struct ubi_wl_entry *e, 370 struct ubi_wl_prot_entry *pe, int abs_ec) 371 { 372 struct rb_node **p, *parent = NULL; 373 struct ubi_wl_prot_entry *pe1; 374 375 pe->e = e; 376 pe->abs_ec = ubi->abs_ec + abs_ec; 377 378 p = &ubi->prot.pnum.rb_node; 379 while (*p) { 380 parent = *p; 381 pe1 = rb_entry(parent, struct ubi_wl_prot_entry, rb_pnum); 382 383 if (e->pnum < pe1->e->pnum) 384 p = &(*p)->rb_left; 385 else 386 p = &(*p)->rb_right; 387 } 388 rb_link_node(&pe->rb_pnum, parent, p); 389 rb_insert_color(&pe->rb_pnum, &ubi->prot.pnum); 390 391 p = &ubi->prot.aec.rb_node; 392 parent = NULL; 393 while (*p) { 394 parent = *p; 395 pe1 = rb_entry(parent, struct ubi_wl_prot_entry, rb_aec); 396 397 if (pe->abs_ec < pe1->abs_ec) 398 p = &(*p)->rb_left; 399 else 400 p = &(*p)->rb_right; 401 } 402 rb_link_node(&pe->rb_aec, parent, p); 403 rb_insert_color(&pe->rb_aec, &ubi->prot.aec); 404 } 405 406 /** 407 * find_wl_entry - find wear-leveling entry closest to certain erase counter. 408 * @root: the RB-tree where to look for 409 * @max: highest possible erase counter 410 * 411 * This function looks for a wear leveling entry with erase counter closest to 412 * @max and less then @max. 413 */ 414 static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max) 415 { 416 struct rb_node *p; 417 struct ubi_wl_entry *e; 418 419 e = rb_entry(rb_first(root), struct ubi_wl_entry, rb); 420 max += e->ec; 421 422 p = root->rb_node; 423 while (p) { 424 struct ubi_wl_entry *e1; 425 426 e1 = rb_entry(p, struct ubi_wl_entry, rb); 427 if (e1->ec >= max) 428 p = p->rb_left; 429 else { 430 p = p->rb_right; 431 e = e1; 432 } 433 } 434 435 return e; 436 } 437 438 /** 439 * ubi_wl_get_peb - get a physical eraseblock. 440 * @ubi: UBI device description object 441 * @dtype: type of data which will be stored in this physical eraseblock 442 * 443 * This function returns a physical eraseblock in case of success and a 444 * negative error code in case of failure. Might sleep. 445 */ 446 int ubi_wl_get_peb(struct ubi_device *ubi, int dtype) 447 { 448 int err, protect, medium_ec; 449 struct ubi_wl_entry *e, *first, *last; 450 struct ubi_wl_prot_entry *pe; 451 452 ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM || 453 dtype == UBI_UNKNOWN); 454 455 pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS); 456 if (!pe) 457 return -ENOMEM; 458 459 retry: 460 spin_lock(&ubi->wl_lock); 461 if (!ubi->free.rb_node) { 462 if (ubi->works_count == 0) { 463 ubi_assert(list_empty(&ubi->works)); 464 ubi_err("no free eraseblocks"); 465 spin_unlock(&ubi->wl_lock); 466 kfree(pe); 467 return -ENOSPC; 468 } 469 spin_unlock(&ubi->wl_lock); 470 471 err = produce_free_peb(ubi); 472 if (err < 0) { 473 kfree(pe); 474 return err; 475 } 476 goto retry; 477 } 478 479 switch (dtype) { 480 case UBI_LONGTERM: 481 /* 482 * For long term data we pick a physical eraseblock 483 * with high erase counter. But the highest erase 484 * counter we can pick is bounded by the the lowest 485 * erase counter plus %WL_FREE_MAX_DIFF. 486 */ 487 e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); 488 protect = LT_PROTECTION; 489 break; 490 case UBI_UNKNOWN: 491 /* 492 * For unknown data we pick a physical eraseblock with 493 * medium erase counter. But we by no means can pick a 494 * physical eraseblock with erase counter greater or 495 * equivalent than the lowest erase counter plus 496 * %WL_FREE_MAX_DIFF. 497 */ 498 first = rb_entry(rb_first(&ubi->free), 499 struct ubi_wl_entry, rb); 500 last = rb_entry(rb_last(&ubi->free), 501 struct ubi_wl_entry, rb); 502 503 if (last->ec - first->ec < WL_FREE_MAX_DIFF) 504 e = rb_entry(ubi->free.rb_node, 505 struct ubi_wl_entry, rb); 506 else { 507 medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2; 508 e = find_wl_entry(&ubi->free, medium_ec); 509 } 510 protect = U_PROTECTION; 511 break; 512 case UBI_SHORTTERM: 513 /* 514 * For short term data we pick a physical eraseblock 515 * with the lowest erase counter as we expect it will 516 * be erased soon. 517 */ 518 e = rb_entry(rb_first(&ubi->free), 519 struct ubi_wl_entry, rb); 520 protect = ST_PROTECTION; 521 break; 522 default: 523 protect = 0; 524 e = NULL; 525 BUG(); 526 } 527 528 /* 529 * Move the physical eraseblock to the protection trees where it will 530 * be protected from being moved for some time. 531 */ 532 paranoid_check_in_wl_tree(e, &ubi->free); 533 rb_erase(&e->rb, &ubi->free); 534 prot_tree_add(ubi, e, pe, protect); 535 536 dbg_wl("PEB %d EC %d, protection %d", e->pnum, e->ec, protect); 537 spin_unlock(&ubi->wl_lock); 538 539 return e->pnum; 540 } 541 542 /** 543 * prot_tree_del - remove a physical eraseblock from the protection trees 544 * @ubi: UBI device description object 545 * @pnum: the physical eraseblock to remove 546 * 547 * This function returns PEB @pnum from the protection trees and returns zero 548 * in case of success and %-ENODEV if the PEB was not found in the protection 549 * trees. 550 */ 551 static int prot_tree_del(struct ubi_device *ubi, int pnum) 552 { 553 struct rb_node *p; 554 struct ubi_wl_prot_entry *pe = NULL; 555 556 p = ubi->prot.pnum.rb_node; 557 while (p) { 558 559 pe = rb_entry(p, struct ubi_wl_prot_entry, rb_pnum); 560 561 if (pnum == pe->e->pnum) 562 goto found; 563 564 if (pnum < pe->e->pnum) 565 p = p->rb_left; 566 else 567 p = p->rb_right; 568 } 569 570 return -ENODEV; 571 572 found: 573 ubi_assert(pe->e->pnum == pnum); 574 rb_erase(&pe->rb_aec, &ubi->prot.aec); 575 rb_erase(&pe->rb_pnum, &ubi->prot.pnum); 576 kfree(pe); 577 return 0; 578 } 579 580 /** 581 * sync_erase - synchronously erase a physical eraseblock. 582 * @ubi: UBI device description object 583 * @e: the the physical eraseblock to erase 584 * @torture: if the physical eraseblock has to be tortured 585 * 586 * This function returns zero in case of success and a negative error code in 587 * case of failure. 588 */ 589 static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, int torture) 590 { 591 int err; 592 struct ubi_ec_hdr *ec_hdr; 593 unsigned long long ec = e->ec; 594 595 dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec); 596 597 err = paranoid_check_ec(ubi, e->pnum, e->ec); 598 if (err > 0) 599 return -EINVAL; 600 601 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS); 602 if (!ec_hdr) 603 return -ENOMEM; 604 605 err = ubi_io_sync_erase(ubi, e->pnum, torture); 606 if (err < 0) 607 goto out_free; 608 609 ec += err; 610 if (ec > UBI_MAX_ERASECOUNTER) { 611 /* 612 * Erase counter overflow. Upgrade UBI and use 64-bit 613 * erase counters internally. 614 */ 615 ubi_err("erase counter overflow at PEB %d, EC %llu", 616 e->pnum, ec); 617 err = -EINVAL; 618 goto out_free; 619 } 620 621 dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec); 622 623 ec_hdr->ec = cpu_to_be64(ec); 624 625 err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr); 626 if (err) 627 goto out_free; 628 629 e->ec = ec; 630 spin_lock(&ubi->wl_lock); 631 if (e->ec > ubi->max_ec) 632 ubi->max_ec = e->ec; 633 spin_unlock(&ubi->wl_lock); 634 635 out_free: 636 kfree(ec_hdr); 637 return err; 638 } 639 640 /** 641 * check_protection_over - check if it is time to stop protecting some 642 * physical eraseblocks. 643 * @ubi: UBI device description object 644 * 645 * This function is called after each erase operation, when the absolute erase 646 * counter is incremented, to check if some physical eraseblock have not to be 647 * protected any longer. These physical eraseblocks are moved from the 648 * protection trees to the used tree. 649 */ 650 static void check_protection_over(struct ubi_device *ubi) 651 { 652 struct ubi_wl_prot_entry *pe; 653 654 /* 655 * There may be several protected physical eraseblock to remove, 656 * process them all. 657 */ 658 while (1) { 659 spin_lock(&ubi->wl_lock); 660 if (!ubi->prot.aec.rb_node) { 661 spin_unlock(&ubi->wl_lock); 662 break; 663 } 664 665 pe = rb_entry(rb_first(&ubi->prot.aec), 666 struct ubi_wl_prot_entry, rb_aec); 667 668 if (pe->abs_ec > ubi->abs_ec) { 669 spin_unlock(&ubi->wl_lock); 670 break; 671 } 672 673 dbg_wl("PEB %d protection over, abs_ec %llu, PEB abs_ec %llu", 674 pe->e->pnum, ubi->abs_ec, pe->abs_ec); 675 rb_erase(&pe->rb_aec, &ubi->prot.aec); 676 rb_erase(&pe->rb_pnum, &ubi->prot.pnum); 677 wl_tree_add(pe->e, &ubi->used); 678 spin_unlock(&ubi->wl_lock); 679 680 kfree(pe); 681 cond_resched(); 682 } 683 } 684 685 /** 686 * schedule_ubi_work - schedule a work. 687 * @ubi: UBI device description object 688 * @wrk: the work to schedule 689 * 690 * This function enqueues a work defined by @wrk to the tail of the pending 691 * works list. 692 */ 693 static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk) 694 { 695 spin_lock(&ubi->wl_lock); 696 list_add_tail(&wrk->list, &ubi->works); 697 ubi_assert(ubi->works_count >= 0); 698 ubi->works_count += 1; 699 if (ubi->thread_enabled) 700 wake_up_process(ubi->bgt_thread); 701 spin_unlock(&ubi->wl_lock); 702 } 703 704 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, 705 int cancel); 706 707 /** 708 * schedule_erase - schedule an erase work. 709 * @ubi: UBI device description object 710 * @e: the WL entry of the physical eraseblock to erase 711 * @torture: if the physical eraseblock has to be tortured 712 * 713 * This function returns zero in case of success and a %-ENOMEM in case of 714 * failure. 715 */ 716 static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, 717 int torture) 718 { 719 struct ubi_work *wl_wrk; 720 721 dbg_wl("schedule erasure of PEB %d, EC %d, torture %d", 722 e->pnum, e->ec, torture); 723 724 wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS); 725 if (!wl_wrk) 726 return -ENOMEM; 727 728 wl_wrk->func = &erase_worker; 729 wl_wrk->e = e; 730 wl_wrk->torture = torture; 731 732 schedule_ubi_work(ubi, wl_wrk); 733 return 0; 734 } 735 736 /** 737 * wear_leveling_worker - wear-leveling worker function. 738 * @ubi: UBI device description object 739 * @wrk: the work object 740 * @cancel: non-zero if the worker has to free memory and exit 741 * 742 * This function copies a more worn out physical eraseblock to a less worn out 743 * one. Returns zero in case of success and a negative error code in case of 744 * failure. 745 */ 746 static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, 747 int cancel) 748 { 749 int err, put = 0, scrubbing = 0, protect = 0; 750 struct ubi_wl_prot_entry *uninitialized_var(pe); 751 struct ubi_wl_entry *e1, *e2; 752 struct ubi_vid_hdr *vid_hdr; 753 754 kfree(wrk); 755 756 if (cancel) 757 return 0; 758 759 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); 760 if (!vid_hdr) 761 return -ENOMEM; 762 763 mutex_lock(&ubi->move_mutex); 764 spin_lock(&ubi->wl_lock); 765 ubi_assert(!ubi->move_from && !ubi->move_to); 766 ubi_assert(!ubi->move_to_put); 767 768 if (!ubi->free.rb_node || 769 (!ubi->used.rb_node && !ubi->scrub.rb_node)) { 770 /* 771 * No free physical eraseblocks? Well, they must be waiting in 772 * the queue to be erased. Cancel movement - it will be 773 * triggered again when a free physical eraseblock appears. 774 * 775 * No used physical eraseblocks? They must be temporarily 776 * protected from being moved. They will be moved to the 777 * @ubi->used tree later and the wear-leveling will be 778 * triggered again. 779 */ 780 dbg_wl("cancel WL, a list is empty: free %d, used %d", 781 !ubi->free.rb_node, !ubi->used.rb_node); 782 goto out_cancel; 783 } 784 785 if (!ubi->scrub.rb_node) { 786 /* 787 * Now pick the least worn-out used physical eraseblock and a 788 * highly worn-out free physical eraseblock. If the erase 789 * counters differ much enough, start wear-leveling. 790 */ 791 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, rb); 792 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); 793 794 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) { 795 dbg_wl("no WL needed: min used EC %d, max free EC %d", 796 e1->ec, e2->ec); 797 goto out_cancel; 798 } 799 paranoid_check_in_wl_tree(e1, &ubi->used); 800 rb_erase(&e1->rb, &ubi->used); 801 dbg_wl("move PEB %d EC %d to PEB %d EC %d", 802 e1->pnum, e1->ec, e2->pnum, e2->ec); 803 } else { 804 /* Perform scrubbing */ 805 scrubbing = 1; 806 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, rb); 807 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); 808 paranoid_check_in_wl_tree(e1, &ubi->scrub); 809 rb_erase(&e1->rb, &ubi->scrub); 810 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum); 811 } 812 813 paranoid_check_in_wl_tree(e2, &ubi->free); 814 rb_erase(&e2->rb, &ubi->free); 815 ubi->move_from = e1; 816 ubi->move_to = e2; 817 spin_unlock(&ubi->wl_lock); 818 819 /* 820 * Now we are going to copy physical eraseblock @e1->pnum to @e2->pnum. 821 * We so far do not know which logical eraseblock our physical 822 * eraseblock (@e1) belongs to. We have to read the volume identifier 823 * header first. 824 * 825 * Note, we are protected from this PEB being unmapped and erased. The 826 * 'ubi_wl_put_peb()' would wait for moving to be finished if the PEB 827 * which is being moved was unmapped. 828 */ 829 830 err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0); 831 if (err && err != UBI_IO_BITFLIPS) { 832 if (err == UBI_IO_PEB_FREE) { 833 /* 834 * We are trying to move PEB without a VID header. UBI 835 * always write VID headers shortly after the PEB was 836 * given, so we have a situation when it did not have 837 * chance to write it down because it was preempted. 838 * Just re-schedule the work, so that next time it will 839 * likely have the VID header in place. 840 */ 841 dbg_wl("PEB %d has no VID header", e1->pnum); 842 goto out_not_moved; 843 } 844 845 ubi_err("error %d while reading VID header from PEB %d", 846 err, e1->pnum); 847 if (err > 0) 848 err = -EIO; 849 goto out_error; 850 } 851 852 err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr); 853 if (err) { 854 855 if (err < 0) 856 goto out_error; 857 if (err == 1) 858 goto out_not_moved; 859 860 /* 861 * For some reason the LEB was not moved - it might be because 862 * the volume is being deleted. We should prevent this PEB from 863 * being selected for wear-levelling movement for some "time", 864 * so put it to the protection tree. 865 */ 866 867 dbg_wl("cancelled moving PEB %d", e1->pnum); 868 pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS); 869 if (!pe) { 870 err = -ENOMEM; 871 goto out_error; 872 } 873 874 protect = 1; 875 } 876 877 ubi_free_vid_hdr(ubi, vid_hdr); 878 spin_lock(&ubi->wl_lock); 879 if (protect) 880 prot_tree_add(ubi, e1, pe, protect); 881 if (!ubi->move_to_put) 882 wl_tree_add(e2, &ubi->used); 883 else 884 put = 1; 885 ubi->move_from = ubi->move_to = NULL; 886 ubi->move_to_put = ubi->wl_scheduled = 0; 887 spin_unlock(&ubi->wl_lock); 888 889 if (put) { 890 /* 891 * Well, the target PEB was put meanwhile, schedule it for 892 * erasure. 893 */ 894 dbg_wl("PEB %d was put meanwhile, erase", e2->pnum); 895 err = schedule_erase(ubi, e2, 0); 896 if (err) 897 goto out_error; 898 } 899 900 if (!protect) { 901 err = schedule_erase(ubi, e1, 0); 902 if (err) 903 goto out_error; 904 } 905 906 907 dbg_wl("done"); 908 mutex_unlock(&ubi->move_mutex); 909 return 0; 910 911 /* 912 * For some reasons the LEB was not moved, might be an error, might be 913 * something else. @e1 was not changed, so return it back. @e2 might 914 * be changed, schedule it for erasure. 915 */ 916 out_not_moved: 917 ubi_free_vid_hdr(ubi, vid_hdr); 918 spin_lock(&ubi->wl_lock); 919 if (scrubbing) 920 wl_tree_add(e1, &ubi->scrub); 921 else 922 wl_tree_add(e1, &ubi->used); 923 ubi->move_from = ubi->move_to = NULL; 924 ubi->move_to_put = ubi->wl_scheduled = 0; 925 spin_unlock(&ubi->wl_lock); 926 927 err = schedule_erase(ubi, e2, 0); 928 if (err) 929 goto out_error; 930 931 mutex_unlock(&ubi->move_mutex); 932 return 0; 933 934 out_error: 935 ubi_err("error %d while moving PEB %d to PEB %d", 936 err, e1->pnum, e2->pnum); 937 938 ubi_free_vid_hdr(ubi, vid_hdr); 939 spin_lock(&ubi->wl_lock); 940 ubi->move_from = ubi->move_to = NULL; 941 ubi->move_to_put = ubi->wl_scheduled = 0; 942 spin_unlock(&ubi->wl_lock); 943 944 kmem_cache_free(ubi_wl_entry_slab, e1); 945 kmem_cache_free(ubi_wl_entry_slab, e2); 946 ubi_ro_mode(ubi); 947 948 mutex_unlock(&ubi->move_mutex); 949 return err; 950 951 out_cancel: 952 ubi->wl_scheduled = 0; 953 spin_unlock(&ubi->wl_lock); 954 mutex_unlock(&ubi->move_mutex); 955 ubi_free_vid_hdr(ubi, vid_hdr); 956 return 0; 957 } 958 959 /** 960 * ensure_wear_leveling - schedule wear-leveling if it is needed. 961 * @ubi: UBI device description object 962 * 963 * This function checks if it is time to start wear-leveling and schedules it 964 * if yes. This function returns zero in case of success and a negative error 965 * code in case of failure. 966 */ 967 static int ensure_wear_leveling(struct ubi_device *ubi) 968 { 969 int err = 0; 970 struct ubi_wl_entry *e1; 971 struct ubi_wl_entry *e2; 972 struct ubi_work *wrk; 973 974 spin_lock(&ubi->wl_lock); 975 if (ubi->wl_scheduled) 976 /* Wear-leveling is already in the work queue */ 977 goto out_unlock; 978 979 /* 980 * If the ubi->scrub tree is not empty, scrubbing is needed, and the 981 * the WL worker has to be scheduled anyway. 982 */ 983 if (!ubi->scrub.rb_node) { 984 if (!ubi->used.rb_node || !ubi->free.rb_node) 985 /* No physical eraseblocks - no deal */ 986 goto out_unlock; 987 988 /* 989 * We schedule wear-leveling only if the difference between the 990 * lowest erase counter of used physical eraseblocks and a high 991 * erase counter of free physical eraseblocks is greater then 992 * %UBI_WL_THRESHOLD. 993 */ 994 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, rb); 995 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); 996 997 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) 998 goto out_unlock; 999 dbg_wl("schedule wear-leveling"); 1000 } else 1001 dbg_wl("schedule scrubbing"); 1002 1003 ubi->wl_scheduled = 1; 1004 spin_unlock(&ubi->wl_lock); 1005 1006 wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS); 1007 if (!wrk) { 1008 err = -ENOMEM; 1009 goto out_cancel; 1010 } 1011 1012 wrk->func = &wear_leveling_worker; 1013 schedule_ubi_work(ubi, wrk); 1014 return err; 1015 1016 out_cancel: 1017 spin_lock(&ubi->wl_lock); 1018 ubi->wl_scheduled = 0; 1019 out_unlock: 1020 spin_unlock(&ubi->wl_lock); 1021 return err; 1022 } 1023 1024 /** 1025 * erase_worker - physical eraseblock erase worker function. 1026 * @ubi: UBI device description object 1027 * @wl_wrk: the work object 1028 * @cancel: non-zero if the worker has to free memory and exit 1029 * 1030 * This function erases a physical eraseblock and perform torture testing if 1031 * needed. It also takes care about marking the physical eraseblock bad if 1032 * needed. Returns zero in case of success and a negative error code in case of 1033 * failure. 1034 */ 1035 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, 1036 int cancel) 1037 { 1038 struct ubi_wl_entry *e = wl_wrk->e; 1039 int pnum = e->pnum, err, need; 1040 1041 if (cancel) { 1042 dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec); 1043 kfree(wl_wrk); 1044 kmem_cache_free(ubi_wl_entry_slab, e); 1045 return 0; 1046 } 1047 1048 dbg_wl("erase PEB %d EC %d", pnum, e->ec); 1049 1050 err = sync_erase(ubi, e, wl_wrk->torture); 1051 if (!err) { 1052 /* Fine, we've erased it successfully */ 1053 kfree(wl_wrk); 1054 1055 spin_lock(&ubi->wl_lock); 1056 ubi->abs_ec += 1; 1057 wl_tree_add(e, &ubi->free); 1058 spin_unlock(&ubi->wl_lock); 1059 1060 /* 1061 * One more erase operation has happened, take care about protected 1062 * physical eraseblocks. 1063 */ 1064 check_protection_over(ubi); 1065 1066 /* And take care about wear-leveling */ 1067 err = ensure_wear_leveling(ubi); 1068 return err; 1069 } 1070 1071 ubi_err("failed to erase PEB %d, error %d", pnum, err); 1072 kfree(wl_wrk); 1073 kmem_cache_free(ubi_wl_entry_slab, e); 1074 1075 if (err == -EINTR || err == -ENOMEM || err == -EAGAIN || 1076 err == -EBUSY) { 1077 int err1; 1078 1079 /* Re-schedule the LEB for erasure */ 1080 err1 = schedule_erase(ubi, e, 0); 1081 if (err1) { 1082 err = err1; 1083 goto out_ro; 1084 } 1085 return err; 1086 } else if (err != -EIO) { 1087 /* 1088 * If this is not %-EIO, we have no idea what to do. Scheduling 1089 * this physical eraseblock for erasure again would cause 1090 * errors again and again. Well, lets switch to RO mode. 1091 */ 1092 goto out_ro; 1093 } 1094 1095 /* It is %-EIO, the PEB went bad */ 1096 1097 if (!ubi->bad_allowed) { 1098 ubi_err("bad physical eraseblock %d detected", pnum); 1099 goto out_ro; 1100 } 1101 1102 spin_lock(&ubi->volumes_lock); 1103 need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs + 1; 1104 if (need > 0) { 1105 need = ubi->avail_pebs >= need ? need : ubi->avail_pebs; 1106 ubi->avail_pebs -= need; 1107 ubi->rsvd_pebs += need; 1108 ubi->beb_rsvd_pebs += need; 1109 if (need > 0) 1110 ubi_msg("reserve more %d PEBs", need); 1111 } 1112 1113 if (ubi->beb_rsvd_pebs == 0) { 1114 spin_unlock(&ubi->volumes_lock); 1115 ubi_err("no reserved physical eraseblocks"); 1116 goto out_ro; 1117 } 1118 1119 spin_unlock(&ubi->volumes_lock); 1120 ubi_msg("mark PEB %d as bad", pnum); 1121 1122 err = ubi_io_mark_bad(ubi, pnum); 1123 if (err) 1124 goto out_ro; 1125 1126 spin_lock(&ubi->volumes_lock); 1127 ubi->beb_rsvd_pebs -= 1; 1128 ubi->bad_peb_count += 1; 1129 ubi->good_peb_count -= 1; 1130 ubi_calculate_reserved(ubi); 1131 if (ubi->beb_rsvd_pebs == 0) 1132 ubi_warn("last PEB from the reserved pool was used"); 1133 spin_unlock(&ubi->volumes_lock); 1134 1135 return err; 1136 1137 out_ro: 1138 ubi_ro_mode(ubi); 1139 return err; 1140 } 1141 1142 /** 1143 * ubi_wl_put_peb - return a physical eraseblock to the wear-leveling unit. 1144 * @ubi: UBI device description object 1145 * @pnum: physical eraseblock to return 1146 * @torture: if this physical eraseblock has to be tortured 1147 * 1148 * This function is called to return physical eraseblock @pnum to the pool of 1149 * free physical eraseblocks. The @torture flag has to be set if an I/O error 1150 * occurred to this @pnum and it has to be tested. This function returns zero 1151 * in case of success, and a negative error code in case of failure. 1152 */ 1153 int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture) 1154 { 1155 int err; 1156 struct ubi_wl_entry *e; 1157 1158 dbg_wl("PEB %d", pnum); 1159 ubi_assert(pnum >= 0); 1160 ubi_assert(pnum < ubi->peb_count); 1161 1162 retry: 1163 spin_lock(&ubi->wl_lock); 1164 e = ubi->lookuptbl[pnum]; 1165 if (e == ubi->move_from) { 1166 /* 1167 * User is putting the physical eraseblock which was selected to 1168 * be moved. It will be scheduled for erasure in the 1169 * wear-leveling worker. 1170 */ 1171 dbg_wl("PEB %d is being moved, wait", pnum); 1172 spin_unlock(&ubi->wl_lock); 1173 1174 /* Wait for the WL worker by taking the @ubi->move_mutex */ 1175 mutex_lock(&ubi->move_mutex); 1176 mutex_unlock(&ubi->move_mutex); 1177 goto retry; 1178 } else if (e == ubi->move_to) { 1179 /* 1180 * User is putting the physical eraseblock which was selected 1181 * as the target the data is moved to. It may happen if the EBA 1182 * unit already re-mapped the LEB in 'ubi_eba_copy_leb()' but 1183 * the WL unit has not put the PEB to the "used" tree yet, but 1184 * it is about to do this. So we just set a flag which will 1185 * tell the WL worker that the PEB is not needed anymore and 1186 * should be scheduled for erasure. 1187 */ 1188 dbg_wl("PEB %d is the target of data moving", pnum); 1189 ubi_assert(!ubi->move_to_put); 1190 ubi->move_to_put = 1; 1191 spin_unlock(&ubi->wl_lock); 1192 return 0; 1193 } else { 1194 if (in_wl_tree(e, &ubi->used)) { 1195 paranoid_check_in_wl_tree(e, &ubi->used); 1196 rb_erase(&e->rb, &ubi->used); 1197 } else if (in_wl_tree(e, &ubi->scrub)) { 1198 paranoid_check_in_wl_tree(e, &ubi->scrub); 1199 rb_erase(&e->rb, &ubi->scrub); 1200 } else { 1201 err = prot_tree_del(ubi, e->pnum); 1202 if (err) { 1203 ubi_err("PEB %d not found", pnum); 1204 ubi_ro_mode(ubi); 1205 spin_unlock(&ubi->wl_lock); 1206 return err; 1207 } 1208 } 1209 } 1210 spin_unlock(&ubi->wl_lock); 1211 1212 err = schedule_erase(ubi, e, torture); 1213 if (err) { 1214 spin_lock(&ubi->wl_lock); 1215 wl_tree_add(e, &ubi->used); 1216 spin_unlock(&ubi->wl_lock); 1217 } 1218 1219 return err; 1220 } 1221 1222 /** 1223 * ubi_wl_scrub_peb - schedule a physical eraseblock for scrubbing. 1224 * @ubi: UBI device description object 1225 * @pnum: the physical eraseblock to schedule 1226 * 1227 * If a bit-flip in a physical eraseblock is detected, this physical eraseblock 1228 * needs scrubbing. This function schedules a physical eraseblock for 1229 * scrubbing which is done in background. This function returns zero in case of 1230 * success and a negative error code in case of failure. 1231 */ 1232 int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum) 1233 { 1234 struct ubi_wl_entry *e; 1235 1236 ubi_msg("schedule PEB %d for scrubbing", pnum); 1237 1238 retry: 1239 spin_lock(&ubi->wl_lock); 1240 e = ubi->lookuptbl[pnum]; 1241 if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub)) { 1242 spin_unlock(&ubi->wl_lock); 1243 return 0; 1244 } 1245 1246 if (e == ubi->move_to) { 1247 /* 1248 * This physical eraseblock was used to move data to. The data 1249 * was moved but the PEB was not yet inserted to the proper 1250 * tree. We should just wait a little and let the WL worker 1251 * proceed. 1252 */ 1253 spin_unlock(&ubi->wl_lock); 1254 dbg_wl("the PEB %d is not in proper tree, retry", pnum); 1255 yield(); 1256 goto retry; 1257 } 1258 1259 if (in_wl_tree(e, &ubi->used)) { 1260 paranoid_check_in_wl_tree(e, &ubi->used); 1261 rb_erase(&e->rb, &ubi->used); 1262 } else { 1263 int err; 1264 1265 err = prot_tree_del(ubi, e->pnum); 1266 if (err) { 1267 ubi_err("PEB %d not found", pnum); 1268 ubi_ro_mode(ubi); 1269 spin_unlock(&ubi->wl_lock); 1270 return err; 1271 } 1272 } 1273 1274 wl_tree_add(e, &ubi->scrub); 1275 spin_unlock(&ubi->wl_lock); 1276 1277 /* 1278 * Technically scrubbing is the same as wear-leveling, so it is done 1279 * by the WL worker. 1280 */ 1281 return ensure_wear_leveling(ubi); 1282 } 1283 1284 /** 1285 * ubi_wl_flush - flush all pending works. 1286 * @ubi: UBI device description object 1287 * 1288 * This function returns zero in case of success and a negative error code in 1289 * case of failure. 1290 */ 1291 int ubi_wl_flush(struct ubi_device *ubi) 1292 { 1293 int err; 1294 1295 /* 1296 * Erase while the pending works queue is not empty, but not more then 1297 * the number of currently pending works. 1298 */ 1299 dbg_wl("flush (%d pending works)", ubi->works_count); 1300 while (ubi->works_count) { 1301 err = do_work(ubi); 1302 if (err) 1303 return err; 1304 } 1305 1306 /* 1307 * Make sure all the works which have been done in parallel are 1308 * finished. 1309 */ 1310 down_write(&ubi->work_sem); 1311 up_write(&ubi->work_sem); 1312 1313 /* 1314 * And in case last was the WL worker and it cancelled the LEB 1315 * movement, flush again. 1316 */ 1317 while (ubi->works_count) { 1318 dbg_wl("flush more (%d pending works)", ubi->works_count); 1319 err = do_work(ubi); 1320 if (err) 1321 return err; 1322 } 1323 1324 return 0; 1325 } 1326 1327 /** 1328 * tree_destroy - destroy an RB-tree. 1329 * @root: the root of the tree to destroy 1330 */ 1331 static void tree_destroy(struct rb_root *root) 1332 { 1333 struct rb_node *rb; 1334 struct ubi_wl_entry *e; 1335 1336 rb = root->rb_node; 1337 while (rb) { 1338 if (rb->rb_left) 1339 rb = rb->rb_left; 1340 else if (rb->rb_right) 1341 rb = rb->rb_right; 1342 else { 1343 e = rb_entry(rb, struct ubi_wl_entry, rb); 1344 1345 rb = rb_parent(rb); 1346 if (rb) { 1347 if (rb->rb_left == &e->rb) 1348 rb->rb_left = NULL; 1349 else 1350 rb->rb_right = NULL; 1351 } 1352 1353 kmem_cache_free(ubi_wl_entry_slab, e); 1354 } 1355 } 1356 } 1357 1358 /** 1359 * ubi_thread - UBI background thread. 1360 * @u: the UBI device description object pointer 1361 */ 1362 int ubi_thread(void *u) 1363 { 1364 int failures = 0; 1365 struct ubi_device *ubi = u; 1366 1367 ubi_msg("background thread \"%s\" started, PID %d", 1368 ubi->bgt_name, task_pid_nr(current)); 1369 1370 set_freezable(); 1371 for (;;) { 1372 int err; 1373 1374 if (kthread_should_stop()) 1375 break; 1376 1377 if (try_to_freeze()) 1378 continue; 1379 1380 spin_lock(&ubi->wl_lock); 1381 if (list_empty(&ubi->works) || ubi->ro_mode || 1382 !ubi->thread_enabled) { 1383 set_current_state(TASK_INTERRUPTIBLE); 1384 spin_unlock(&ubi->wl_lock); 1385 schedule(); 1386 continue; 1387 } 1388 spin_unlock(&ubi->wl_lock); 1389 1390 err = do_work(ubi); 1391 if (err) { 1392 ubi_err("%s: work failed with error code %d", 1393 ubi->bgt_name, err); 1394 if (failures++ > WL_MAX_FAILURES) { 1395 /* 1396 * Too many failures, disable the thread and 1397 * switch to read-only mode. 1398 */ 1399 ubi_msg("%s: %d consecutive failures", 1400 ubi->bgt_name, WL_MAX_FAILURES); 1401 ubi_ro_mode(ubi); 1402 break; 1403 } 1404 } else 1405 failures = 0; 1406 1407 cond_resched(); 1408 } 1409 1410 dbg_wl("background thread \"%s\" is killed", ubi->bgt_name); 1411 return 0; 1412 } 1413 1414 /** 1415 * cancel_pending - cancel all pending works. 1416 * @ubi: UBI device description object 1417 */ 1418 static void cancel_pending(struct ubi_device *ubi) 1419 { 1420 while (!list_empty(&ubi->works)) { 1421 struct ubi_work *wrk; 1422 1423 wrk = list_entry(ubi->works.next, struct ubi_work, list); 1424 list_del(&wrk->list); 1425 wrk->func(ubi, wrk, 1); 1426 ubi->works_count -= 1; 1427 ubi_assert(ubi->works_count >= 0); 1428 } 1429 } 1430 1431 /** 1432 * ubi_wl_init_scan - initialize the wear-leveling unit using scanning 1433 * information. 1434 * @ubi: UBI device description object 1435 * @si: scanning information 1436 * 1437 * This function returns zero in case of success, and a negative error code in 1438 * case of failure. 1439 */ 1440 int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si) 1441 { 1442 int err; 1443 struct rb_node *rb1, *rb2; 1444 struct ubi_scan_volume *sv; 1445 struct ubi_scan_leb *seb, *tmp; 1446 struct ubi_wl_entry *e; 1447 1448 1449 ubi->used = ubi->free = ubi->scrub = RB_ROOT; 1450 ubi->prot.pnum = ubi->prot.aec = RB_ROOT; 1451 spin_lock_init(&ubi->wl_lock); 1452 mutex_init(&ubi->move_mutex); 1453 init_rwsem(&ubi->work_sem); 1454 ubi->max_ec = si->max_ec; 1455 INIT_LIST_HEAD(&ubi->works); 1456 1457 sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num); 1458 1459 err = -ENOMEM; 1460 ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL); 1461 if (!ubi->lookuptbl) 1462 return err; 1463 1464 list_for_each_entry_safe(seb, tmp, &si->erase, u.list) { 1465 cond_resched(); 1466 1467 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); 1468 if (!e) 1469 goto out_free; 1470 1471 e->pnum = seb->pnum; 1472 e->ec = seb->ec; 1473 ubi->lookuptbl[e->pnum] = e; 1474 if (schedule_erase(ubi, e, 0)) { 1475 kmem_cache_free(ubi_wl_entry_slab, e); 1476 goto out_free; 1477 } 1478 } 1479 1480 list_for_each_entry(seb, &si->free, u.list) { 1481 cond_resched(); 1482 1483 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); 1484 if (!e) 1485 goto out_free; 1486 1487 e->pnum = seb->pnum; 1488 e->ec = seb->ec; 1489 ubi_assert(e->ec >= 0); 1490 wl_tree_add(e, &ubi->free); 1491 ubi->lookuptbl[e->pnum] = e; 1492 } 1493 1494 list_for_each_entry(seb, &si->corr, u.list) { 1495 cond_resched(); 1496 1497 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); 1498 if (!e) 1499 goto out_free; 1500 1501 e->pnum = seb->pnum; 1502 e->ec = seb->ec; 1503 ubi->lookuptbl[e->pnum] = e; 1504 if (schedule_erase(ubi, e, 0)) { 1505 kmem_cache_free(ubi_wl_entry_slab, e); 1506 goto out_free; 1507 } 1508 } 1509 1510 ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) { 1511 ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) { 1512 cond_resched(); 1513 1514 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); 1515 if (!e) 1516 goto out_free; 1517 1518 e->pnum = seb->pnum; 1519 e->ec = seb->ec; 1520 ubi->lookuptbl[e->pnum] = e; 1521 if (!seb->scrub) { 1522 dbg_wl("add PEB %d EC %d to the used tree", 1523 e->pnum, e->ec); 1524 wl_tree_add(e, &ubi->used); 1525 } else { 1526 dbg_wl("add PEB %d EC %d to the scrub tree", 1527 e->pnum, e->ec); 1528 wl_tree_add(e, &ubi->scrub); 1529 } 1530 } 1531 } 1532 1533 if (ubi->avail_pebs < WL_RESERVED_PEBS) { 1534 ubi_err("no enough physical eraseblocks (%d, need %d)", 1535 ubi->avail_pebs, WL_RESERVED_PEBS); 1536 goto out_free; 1537 } 1538 ubi->avail_pebs -= WL_RESERVED_PEBS; 1539 ubi->rsvd_pebs += WL_RESERVED_PEBS; 1540 1541 /* Schedule wear-leveling if needed */ 1542 err = ensure_wear_leveling(ubi); 1543 if (err) 1544 goto out_free; 1545 1546 return 0; 1547 1548 out_free: 1549 cancel_pending(ubi); 1550 tree_destroy(&ubi->used); 1551 tree_destroy(&ubi->free); 1552 tree_destroy(&ubi->scrub); 1553 kfree(ubi->lookuptbl); 1554 return err; 1555 } 1556 1557 /** 1558 * protection_trees_destroy - destroy the protection RB-trees. 1559 * @ubi: UBI device description object 1560 */ 1561 static void protection_trees_destroy(struct ubi_device *ubi) 1562 { 1563 struct rb_node *rb; 1564 struct ubi_wl_prot_entry *pe; 1565 1566 rb = ubi->prot.aec.rb_node; 1567 while (rb) { 1568 if (rb->rb_left) 1569 rb = rb->rb_left; 1570 else if (rb->rb_right) 1571 rb = rb->rb_right; 1572 else { 1573 pe = rb_entry(rb, struct ubi_wl_prot_entry, rb_aec); 1574 1575 rb = rb_parent(rb); 1576 if (rb) { 1577 if (rb->rb_left == &pe->rb_aec) 1578 rb->rb_left = NULL; 1579 else 1580 rb->rb_right = NULL; 1581 } 1582 1583 kmem_cache_free(ubi_wl_entry_slab, pe->e); 1584 kfree(pe); 1585 } 1586 } 1587 } 1588 1589 /** 1590 * ubi_wl_close - close the wear-leveling unit. 1591 * @ubi: UBI device description object 1592 */ 1593 void ubi_wl_close(struct ubi_device *ubi) 1594 { 1595 dbg_wl("close the UBI wear-leveling unit"); 1596 1597 cancel_pending(ubi); 1598 protection_trees_destroy(ubi); 1599 tree_destroy(&ubi->used); 1600 tree_destroy(&ubi->free); 1601 tree_destroy(&ubi->scrub); 1602 kfree(ubi->lookuptbl); 1603 } 1604 1605 #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID 1606 1607 /** 1608 * paranoid_check_ec - make sure that the erase counter of a physical eraseblock 1609 * is correct. 1610 * @ubi: UBI device description object 1611 * @pnum: the physical eraseblock number to check 1612 * @ec: the erase counter to check 1613 * 1614 * This function returns zero if the erase counter of physical eraseblock @pnum 1615 * is equivalent to @ec, %1 if not, and a negative error code if an error 1616 * occurred. 1617 */ 1618 static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec) 1619 { 1620 int err; 1621 long long read_ec; 1622 struct ubi_ec_hdr *ec_hdr; 1623 1624 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS); 1625 if (!ec_hdr) 1626 return -ENOMEM; 1627 1628 err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0); 1629 if (err && err != UBI_IO_BITFLIPS) { 1630 /* The header does not have to exist */ 1631 err = 0; 1632 goto out_free; 1633 } 1634 1635 read_ec = be64_to_cpu(ec_hdr->ec); 1636 if (ec != read_ec) { 1637 ubi_err("paranoid check failed for PEB %d", pnum); 1638 ubi_err("read EC is %lld, should be %d", read_ec, ec); 1639 ubi_dbg_dump_stack(); 1640 err = 1; 1641 } else 1642 err = 0; 1643 1644 out_free: 1645 kfree(ec_hdr); 1646 return err; 1647 } 1648 1649 /** 1650 * paranoid_check_in_wl_tree - make sure that a wear-leveling entry is present 1651 * in a WL RB-tree. 1652 * @e: the wear-leveling entry to check 1653 * @root: the root of the tree 1654 * 1655 * This function returns zero if @e is in the @root RB-tree and %1 if it 1656 * is not. 1657 */ 1658 static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e, 1659 struct rb_root *root) 1660 { 1661 if (in_wl_tree(e, root)) 1662 return 0; 1663 1664 ubi_err("paranoid check failed for PEB %d, EC %d, RB-tree %p ", 1665 e->pnum, e->ec, root); 1666 ubi_dbg_dump_stack(); 1667 return 1; 1668 } 1669 1670 #endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */ 1671