1 /* 2 * Copyright (c) International Business Machines Corp., 2006 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See 12 * the GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 * 18 * Authors: Artem Bityutskiy (Битюцкий Артём), Thomas Gleixner 19 */ 20 21 /* 22 * UBI wear-leveling unit. 23 * 24 * This unit is responsible for wear-leveling. It works in terms of physical 25 * eraseblocks and erase counters and knows nothing about logical eraseblocks, 26 * volumes, etc. From this unit's perspective all physical eraseblocks are of 27 * two types - used and free. Used physical eraseblocks are those that were 28 * "get" by the 'ubi_wl_get_peb()' function, and free physical eraseblocks are 29 * those that were put by the 'ubi_wl_put_peb()' function. 30 * 31 * Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter 32 * header. The rest of the physical eraseblock contains only 0xFF bytes. 33 * 34 * When physical eraseblocks are returned to the WL unit by means of the 35 * 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is 36 * done asynchronously in context of the per-UBI device background thread, 37 * which is also managed by the WL unit. 38 * 39 * The wear-leveling is ensured by means of moving the contents of used 40 * physical eraseblocks with low erase counter to free physical eraseblocks 41 * with high erase counter. 42 * 43 * The 'ubi_wl_get_peb()' function accepts data type hints which help to pick 44 * an "optimal" physical eraseblock. For example, when it is known that the 45 * physical eraseblock will be "put" soon because it contains short-term data, 46 * the WL unit may pick a free physical eraseblock with low erase counter, and 47 * so forth. 48 * 49 * If the WL unit fails to erase a physical eraseblock, it marks it as bad. 50 * 51 * This unit is also responsible for scrubbing. If a bit-flip is detected in a 52 * physical eraseblock, it has to be moved. Technically this is the same as 53 * moving it for wear-leveling reasons. 54 * 55 * As it was said, for the UBI unit all physical eraseblocks are either "free" 56 * or "used". Free eraseblock are kept in the @wl->free RB-tree, while used 57 * eraseblocks are kept in a set of different RB-trees: @wl->used, 58 * @wl->prot.pnum, @wl->prot.aec, and @wl->scrub. 59 * 60 * Note, in this implementation, we keep a small in-RAM object for each physical 61 * eraseblock. This is surely not a scalable solution. But it appears to be good 62 * enough for moderately large flashes and it is simple. In future, one may 63 * re-work this unit and make it more scalable. 64 * 65 * At the moment this unit does not utilize the sequence number, which was 66 * introduced relatively recently. But it would be wise to do this because the 67 * sequence number of a logical eraseblock characterizes how old is it. For 68 * example, when we move a PEB with low erase counter, and we need to pick the 69 * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we 70 * pick target PEB with an average EC if our PEB is not very "old". This is a 71 * room for future re-works of the WL unit. 72 * 73 * FIXME: looks too complex, should be simplified (later). 74 */ 75 76 #include <linux/slab.h> 77 #include <linux/crc32.h> 78 #include <linux/freezer.h> 79 #include <linux/kthread.h> 80 #include "ubi.h" 81 82 /* Number of physical eraseblocks reserved for wear-leveling purposes */ 83 #define WL_RESERVED_PEBS 1 84 85 /* 86 * How many erase cycles are short term, unknown, and long term physical 87 * eraseblocks protected. 88 */ 89 #define ST_PROTECTION 16 90 #define U_PROTECTION 10 91 #define LT_PROTECTION 4 92 93 /* 94 * Maximum difference between two erase counters. If this threshold is 95 * exceeded, the WL unit starts moving data from used physical eraseblocks with 96 * low erase counter to free physical eraseblocks with high erase counter. 97 */ 98 #define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD 99 100 /* 101 * When a physical eraseblock is moved, the WL unit has to pick the target 102 * physical eraseblock to move to. The simplest way would be just to pick the 103 * one with the highest erase counter. But in certain workloads this could lead 104 * to an unlimited wear of one or few physical eraseblock. Indeed, imagine a 105 * situation when the picked physical eraseblock is constantly erased after the 106 * data is written to it. So, we have a constant which limits the highest erase 107 * counter of the free physical eraseblock to pick. Namely, the WL unit does 108 * not pick eraseblocks with erase counter greater then the lowest erase 109 * counter plus %WL_FREE_MAX_DIFF. 110 */ 111 #define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD) 112 113 /* 114 * Maximum number of consecutive background thread failures which is enough to 115 * switch to read-only mode. 116 */ 117 #define WL_MAX_FAILURES 32 118 119 /** 120 * struct ubi_wl_entry - wear-leveling entry. 121 * @rb: link in the corresponding RB-tree 122 * @ec: erase counter 123 * @pnum: physical eraseblock number 124 * 125 * Each physical eraseblock has a corresponding &struct wl_entry object which 126 * may be kept in different RB-trees. 127 */ 128 struct ubi_wl_entry { 129 struct rb_node rb; 130 int ec; 131 int pnum; 132 }; 133 134 /** 135 * struct ubi_wl_prot_entry - PEB protection entry. 136 * @rb_pnum: link in the @wl->prot.pnum RB-tree 137 * @rb_aec: link in the @wl->prot.aec RB-tree 138 * @abs_ec: the absolute erase counter value when the protection ends 139 * @e: the wear-leveling entry of the physical eraseblock under protection 140 * 141 * When the WL unit returns a physical eraseblock, the physical eraseblock is 142 * protected from being moved for some "time". For this reason, the physical 143 * eraseblock is not directly moved from the @wl->free tree to the @wl->used 144 * tree. There is one more tree in between where this physical eraseblock is 145 * temporarily stored (@wl->prot). 146 * 147 * All this protection stuff is needed because: 148 * o we don't want to move physical eraseblocks just after we have given them 149 * to the user; instead, we first want to let users fill them up with data; 150 * 151 * o there is a chance that the user will put the physical eraseblock very 152 * soon, so it makes sense not to move it for some time, but wait; this is 153 * especially important in case of "short term" physical eraseblocks. 154 * 155 * Physical eraseblocks stay protected only for limited time. But the "time" is 156 * measured in erase cycles in this case. This is implemented with help of the 157 * absolute erase counter (@wl->abs_ec). When it reaches certain value, the 158 * physical eraseblocks are moved from the protection trees (@wl->prot.*) to 159 * the @wl->used tree. 160 * 161 * Protected physical eraseblocks are searched by physical eraseblock number 162 * (when they are put) and by the absolute erase counter (to check if it is 163 * time to move them to the @wl->used tree). So there are actually 2 RB-trees 164 * storing the protected physical eraseblocks: @wl->prot.pnum and 165 * @wl->prot.aec. They are referred to as the "protection" trees. The 166 * first one is indexed by the physical eraseblock number. The second one is 167 * indexed by the absolute erase counter. Both trees store 168 * &struct ubi_wl_prot_entry objects. 169 * 170 * Each physical eraseblock has 2 main states: free and used. The former state 171 * corresponds to the @wl->free tree. The latter state is split up on several 172 * sub-states: 173 * o the WL movement is allowed (@wl->used tree); 174 * o the WL movement is temporarily prohibited (@wl->prot.pnum and 175 * @wl->prot.aec trees); 176 * o scrubbing is needed (@wl->scrub tree). 177 * 178 * Depending on the sub-state, wear-leveling entries of the used physical 179 * eraseblocks may be kept in one of those trees. 180 */ 181 struct ubi_wl_prot_entry { 182 struct rb_node rb_pnum; 183 struct rb_node rb_aec; 184 unsigned long long abs_ec; 185 struct ubi_wl_entry *e; 186 }; 187 188 /** 189 * struct ubi_work - UBI work description data structure. 190 * @list: a link in the list of pending works 191 * @func: worker function 192 * @priv: private data of the worker function 193 * 194 * @e: physical eraseblock to erase 195 * @torture: if the physical eraseblock has to be tortured 196 * 197 * The @func pointer points to the worker function. If the @cancel argument is 198 * not zero, the worker has to free the resources and exit immediately. The 199 * worker has to return zero in case of success and a negative error code in 200 * case of failure. 201 */ 202 struct ubi_work { 203 struct list_head list; 204 int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int cancel); 205 /* The below fields are only relevant to erasure works */ 206 struct ubi_wl_entry *e; 207 int torture; 208 }; 209 210 #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID 211 static int paranoid_check_ec(const struct ubi_device *ubi, int pnum, int ec); 212 static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e, 213 struct rb_root *root); 214 #else 215 #define paranoid_check_ec(ubi, pnum, ec) 0 216 #define paranoid_check_in_wl_tree(e, root) 217 #endif 218 219 /* Slab cache for wear-leveling entries */ 220 static struct kmem_cache *wl_entries_slab; 221 222 /** 223 * tree_empty - a helper function to check if an RB-tree is empty. 224 * @root: the root of the tree 225 * 226 * This function returns non-zero if the RB-tree is empty and zero if not. 227 */ 228 static inline int tree_empty(struct rb_root *root) 229 { 230 return root->rb_node == NULL; 231 } 232 233 /** 234 * wl_tree_add - add a wear-leveling entry to a WL RB-tree. 235 * @e: the wear-leveling entry to add 236 * @root: the root of the tree 237 * 238 * Note, we use (erase counter, physical eraseblock number) pairs as keys in 239 * the @ubi->used and @ubi->free RB-trees. 240 */ 241 static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root) 242 { 243 struct rb_node **p, *parent = NULL; 244 245 p = &root->rb_node; 246 while (*p) { 247 struct ubi_wl_entry *e1; 248 249 parent = *p; 250 e1 = rb_entry(parent, struct ubi_wl_entry, rb); 251 252 if (e->ec < e1->ec) 253 p = &(*p)->rb_left; 254 else if (e->ec > e1->ec) 255 p = &(*p)->rb_right; 256 else { 257 ubi_assert(e->pnum != e1->pnum); 258 if (e->pnum < e1->pnum) 259 p = &(*p)->rb_left; 260 else 261 p = &(*p)->rb_right; 262 } 263 } 264 265 rb_link_node(&e->rb, parent, p); 266 rb_insert_color(&e->rb, root); 267 } 268 269 270 /* 271 * Helper functions to add and delete wear-leveling entries from different 272 * trees. 273 */ 274 275 static void free_tree_add(struct ubi_device *ubi, struct ubi_wl_entry *e) 276 { 277 wl_tree_add(e, &ubi->free); 278 } 279 static inline void used_tree_add(struct ubi_device *ubi, 280 struct ubi_wl_entry *e) 281 { 282 wl_tree_add(e, &ubi->used); 283 } 284 static inline void scrub_tree_add(struct ubi_device *ubi, 285 struct ubi_wl_entry *e) 286 { 287 wl_tree_add(e, &ubi->scrub); 288 } 289 static inline void free_tree_del(struct ubi_device *ubi, 290 struct ubi_wl_entry *e) 291 { 292 paranoid_check_in_wl_tree(e, &ubi->free); 293 rb_erase(&e->rb, &ubi->free); 294 } 295 static inline void used_tree_del(struct ubi_device *ubi, 296 struct ubi_wl_entry *e) 297 { 298 paranoid_check_in_wl_tree(e, &ubi->used); 299 rb_erase(&e->rb, &ubi->used); 300 } 301 static inline void scrub_tree_del(struct ubi_device *ubi, 302 struct ubi_wl_entry *e) 303 { 304 paranoid_check_in_wl_tree(e, &ubi->scrub); 305 rb_erase(&e->rb, &ubi->scrub); 306 } 307 308 /** 309 * do_work - do one pending work. 310 * @ubi: UBI device description object 311 * 312 * This function returns zero in case of success and a negative error code in 313 * case of failure. 314 */ 315 static int do_work(struct ubi_device *ubi) 316 { 317 int err; 318 struct ubi_work *wrk; 319 320 spin_lock(&ubi->wl_lock); 321 322 if (list_empty(&ubi->works)) { 323 spin_unlock(&ubi->wl_lock); 324 return 0; 325 } 326 327 wrk = list_entry(ubi->works.next, struct ubi_work, list); 328 list_del(&wrk->list); 329 spin_unlock(&ubi->wl_lock); 330 331 /* 332 * Call the worker function. Do not touch the work structure 333 * after this call as it will have been freed or reused by that 334 * time by the worker function. 335 */ 336 err = wrk->func(ubi, wrk, 0); 337 if (err) 338 ubi_err("work failed with error code %d", err); 339 340 spin_lock(&ubi->wl_lock); 341 ubi->works_count -= 1; 342 ubi_assert(ubi->works_count >= 0); 343 spin_unlock(&ubi->wl_lock); 344 return err; 345 } 346 347 /** 348 * produce_free_peb - produce a free physical eraseblock. 349 * @ubi: UBI device description object 350 * 351 * This function tries to make a free PEB by means of synchronous execution of 352 * pending works. This may be needed if, for example the background thread is 353 * disabled. Returns zero in case of success and a negative error code in case 354 * of failure. 355 */ 356 static int produce_free_peb(struct ubi_device *ubi) 357 { 358 int err; 359 360 spin_lock(&ubi->wl_lock); 361 while (tree_empty(&ubi->free)) { 362 spin_unlock(&ubi->wl_lock); 363 364 dbg_wl("do one work synchronously"); 365 err = do_work(ubi); 366 if (err) 367 return err; 368 369 spin_lock(&ubi->wl_lock); 370 } 371 spin_unlock(&ubi->wl_lock); 372 373 return 0; 374 } 375 376 /** 377 * in_wl_tree - check if wear-leveling entry is present in a WL RB-tree. 378 * @e: the wear-leveling entry to check 379 * @root: the root of the tree 380 * 381 * This function returns non-zero if @e is in the @root RB-tree and zero if it 382 * is not. 383 */ 384 static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root) 385 { 386 struct rb_node *p; 387 388 p = root->rb_node; 389 while (p) { 390 struct ubi_wl_entry *e1; 391 392 e1 = rb_entry(p, struct ubi_wl_entry, rb); 393 394 if (e->pnum == e1->pnum) { 395 ubi_assert(e == e1); 396 return 1; 397 } 398 399 if (e->ec < e1->ec) 400 p = p->rb_left; 401 else if (e->ec > e1->ec) 402 p = p->rb_right; 403 else { 404 ubi_assert(e->pnum != e1->pnum); 405 if (e->pnum < e1->pnum) 406 p = p->rb_left; 407 else 408 p = p->rb_right; 409 } 410 } 411 412 return 0; 413 } 414 415 /** 416 * prot_tree_add - add physical eraseblock to protection trees. 417 * @ubi: UBI device description object 418 * @e: the physical eraseblock to add 419 * @pe: protection entry object to use 420 * @abs_ec: absolute erase counter value when this physical eraseblock has 421 * to be removed from the protection trees. 422 * 423 * @wl->lock has to be locked. 424 */ 425 static void prot_tree_add(struct ubi_device *ubi, struct ubi_wl_entry *e, 426 struct ubi_wl_prot_entry *pe, int abs_ec) 427 { 428 struct rb_node **p, *parent = NULL; 429 struct ubi_wl_prot_entry *pe1; 430 431 pe->e = e; 432 pe->abs_ec = ubi->abs_ec + abs_ec; 433 434 p = &ubi->prot.pnum.rb_node; 435 while (*p) { 436 parent = *p; 437 pe1 = rb_entry(parent, struct ubi_wl_prot_entry, rb_pnum); 438 439 if (e->pnum < pe1->e->pnum) 440 p = &(*p)->rb_left; 441 else 442 p = &(*p)->rb_right; 443 } 444 rb_link_node(&pe->rb_pnum, parent, p); 445 rb_insert_color(&pe->rb_pnum, &ubi->prot.pnum); 446 447 p = &ubi->prot.aec.rb_node; 448 parent = NULL; 449 while (*p) { 450 parent = *p; 451 pe1 = rb_entry(parent, struct ubi_wl_prot_entry, rb_aec); 452 453 if (pe->abs_ec < pe1->abs_ec) 454 p = &(*p)->rb_left; 455 else 456 p = &(*p)->rb_right; 457 } 458 rb_link_node(&pe->rb_aec, parent, p); 459 rb_insert_color(&pe->rb_aec, &ubi->prot.aec); 460 } 461 462 /** 463 * find_wl_entry - find wear-leveling entry closest to certain erase counter. 464 * @root: the RB-tree where to look for 465 * @max: highest possible erase counter 466 * 467 * This function looks for a wear leveling entry with erase counter closest to 468 * @max and less then @max. 469 */ 470 static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max) 471 { 472 struct rb_node *p; 473 struct ubi_wl_entry *e; 474 475 e = rb_entry(rb_first(root), struct ubi_wl_entry, rb); 476 max += e->ec; 477 478 p = root->rb_node; 479 while (p) { 480 struct ubi_wl_entry *e1; 481 482 e1 = rb_entry(p, struct ubi_wl_entry, rb); 483 if (e1->ec >= max) 484 p = p->rb_left; 485 else { 486 p = p->rb_right; 487 e = e1; 488 } 489 } 490 491 return e; 492 } 493 494 /** 495 * ubi_wl_get_peb - get a physical eraseblock. 496 * @ubi: UBI device description object 497 * @dtype: type of data which will be stored in this physical eraseblock 498 * 499 * This function returns a physical eraseblock in case of success and a 500 * negative error code in case of failure. Might sleep. 501 */ 502 int ubi_wl_get_peb(struct ubi_device *ubi, int dtype) 503 { 504 int err, protect, medium_ec; 505 struct ubi_wl_entry *e, *first, *last; 506 struct ubi_wl_prot_entry *pe; 507 508 ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM || 509 dtype == UBI_UNKNOWN); 510 511 pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_KERNEL); 512 if (!pe) 513 return -ENOMEM; 514 515 retry: 516 spin_lock(&ubi->wl_lock); 517 if (tree_empty(&ubi->free)) { 518 if (ubi->works_count == 0) { 519 ubi_assert(list_empty(&ubi->works)); 520 ubi_err("no free eraseblocks"); 521 spin_unlock(&ubi->wl_lock); 522 kfree(pe); 523 return -ENOSPC; 524 } 525 spin_unlock(&ubi->wl_lock); 526 527 err = produce_free_peb(ubi); 528 if (err < 0) { 529 kfree(pe); 530 return err; 531 } 532 goto retry; 533 } 534 535 switch (dtype) { 536 case UBI_LONGTERM: 537 /* 538 * For long term data we pick a physical eraseblock 539 * with high erase counter. But the highest erase 540 * counter we can pick is bounded by the the lowest 541 * erase counter plus %WL_FREE_MAX_DIFF. 542 */ 543 e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); 544 protect = LT_PROTECTION; 545 break; 546 case UBI_UNKNOWN: 547 /* 548 * For unknown data we pick a physical eraseblock with 549 * medium erase counter. But we by no means can pick a 550 * physical eraseblock with erase counter greater or 551 * equivalent than the lowest erase counter plus 552 * %WL_FREE_MAX_DIFF. 553 */ 554 first = rb_entry(rb_first(&ubi->free), 555 struct ubi_wl_entry, rb); 556 last = rb_entry(rb_last(&ubi->free), 557 struct ubi_wl_entry, rb); 558 559 if (last->ec - first->ec < WL_FREE_MAX_DIFF) 560 e = rb_entry(ubi->free.rb_node, 561 struct ubi_wl_entry, rb); 562 else { 563 medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2; 564 e = find_wl_entry(&ubi->free, medium_ec); 565 } 566 protect = U_PROTECTION; 567 break; 568 case UBI_SHORTTERM: 569 /* 570 * For short term data we pick a physical eraseblock 571 * with the lowest erase counter as we expect it will 572 * be erased soon. 573 */ 574 e = rb_entry(rb_first(&ubi->free), 575 struct ubi_wl_entry, rb); 576 protect = ST_PROTECTION; 577 break; 578 default: 579 protect = 0; 580 e = NULL; 581 BUG(); 582 } 583 584 /* 585 * Move the physical eraseblock to the protection trees where it will 586 * be protected from being moved for some time. 587 */ 588 free_tree_del(ubi, e); 589 prot_tree_add(ubi, e, pe, protect); 590 591 dbg_wl("PEB %d EC %d, protection %d", e->pnum, e->ec, protect); 592 spin_unlock(&ubi->wl_lock); 593 594 return e->pnum; 595 } 596 597 /** 598 * prot_tree_del - remove a physical eraseblock from the protection trees 599 * @ubi: UBI device description object 600 * @pnum: the physical eraseblock to remove 601 */ 602 static void prot_tree_del(struct ubi_device *ubi, int pnum) 603 { 604 struct rb_node *p; 605 struct ubi_wl_prot_entry *pe = NULL; 606 607 p = ubi->prot.pnum.rb_node; 608 while (p) { 609 610 pe = rb_entry(p, struct ubi_wl_prot_entry, rb_pnum); 611 612 if (pnum == pe->e->pnum) 613 break; 614 615 if (pnum < pe->e->pnum) 616 p = p->rb_left; 617 else 618 p = p->rb_right; 619 } 620 621 ubi_assert(pe->e->pnum == pnum); 622 rb_erase(&pe->rb_aec, &ubi->prot.aec); 623 rb_erase(&pe->rb_pnum, &ubi->prot.pnum); 624 kfree(pe); 625 } 626 627 /** 628 * sync_erase - synchronously erase a physical eraseblock. 629 * @ubi: UBI device description object 630 * @e: the the physical eraseblock to erase 631 * @torture: if the physical eraseblock has to be tortured 632 * 633 * This function returns zero in case of success and a negative error code in 634 * case of failure. 635 */ 636 static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, int torture) 637 { 638 int err; 639 struct ubi_ec_hdr *ec_hdr; 640 unsigned long long ec = e->ec; 641 642 dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec); 643 644 err = paranoid_check_ec(ubi, e->pnum, e->ec); 645 if (err > 0) 646 return -EINVAL; 647 648 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); 649 if (!ec_hdr) 650 return -ENOMEM; 651 652 err = ubi_io_sync_erase(ubi, e->pnum, torture); 653 if (err < 0) 654 goto out_free; 655 656 ec += err; 657 if (ec > UBI_MAX_ERASECOUNTER) { 658 /* 659 * Erase counter overflow. Upgrade UBI and use 64-bit 660 * erase counters internally. 661 */ 662 ubi_err("erase counter overflow at PEB %d, EC %llu", 663 e->pnum, ec); 664 err = -EINVAL; 665 goto out_free; 666 } 667 668 dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec); 669 670 ec_hdr->ec = cpu_to_be64(ec); 671 672 err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr); 673 if (err) 674 goto out_free; 675 676 e->ec = ec; 677 spin_lock(&ubi->wl_lock); 678 if (e->ec > ubi->max_ec) 679 ubi->max_ec = e->ec; 680 spin_unlock(&ubi->wl_lock); 681 682 out_free: 683 kfree(ec_hdr); 684 return err; 685 } 686 687 /** 688 * check_protection_over - check if it is time to stop protecting some 689 * physical eraseblocks. 690 * @ubi: UBI device description object 691 * 692 * This function is called after each erase operation, when the absolute erase 693 * counter is incremented, to check if some physical eraseblock have not to be 694 * protected any longer. These physical eraseblocks are moved from the 695 * protection trees to the used tree. 696 */ 697 static void check_protection_over(struct ubi_device *ubi) 698 { 699 struct ubi_wl_prot_entry *pe; 700 701 /* 702 * There may be several protected physical eraseblock to remove, 703 * process them all. 704 */ 705 while (1) { 706 spin_lock(&ubi->wl_lock); 707 if (tree_empty(&ubi->prot.aec)) { 708 spin_unlock(&ubi->wl_lock); 709 break; 710 } 711 712 pe = rb_entry(rb_first(&ubi->prot.aec), 713 struct ubi_wl_prot_entry, rb_aec); 714 715 if (pe->abs_ec > ubi->abs_ec) { 716 spin_unlock(&ubi->wl_lock); 717 break; 718 } 719 720 dbg_wl("PEB %d protection over, abs_ec %llu, PEB abs_ec %llu", 721 pe->e->pnum, ubi->abs_ec, pe->abs_ec); 722 rb_erase(&pe->rb_aec, &ubi->prot.aec); 723 rb_erase(&pe->rb_pnum, &ubi->prot.pnum); 724 used_tree_add(ubi, pe->e); 725 spin_unlock(&ubi->wl_lock); 726 727 kfree(pe); 728 cond_resched(); 729 } 730 } 731 732 /** 733 * schedule_ubi_work - schedule a work. 734 * @ubi: UBI device description object 735 * @wrk: the work to schedule 736 * 737 * This function enqueues a work defined by @wrk to the tail of the pending 738 * works list. 739 */ 740 static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk) 741 { 742 spin_lock(&ubi->wl_lock); 743 list_add_tail(&wrk->list, &ubi->works); 744 ubi_assert(ubi->works_count >= 0); 745 ubi->works_count += 1; 746 if (ubi->thread_enabled) 747 wake_up_process(ubi->bgt_thread); 748 spin_unlock(&ubi->wl_lock); 749 } 750 751 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, 752 int cancel); 753 754 /** 755 * schedule_erase - schedule an erase work. 756 * @ubi: UBI device description object 757 * @e: the WL entry of the physical eraseblock to erase 758 * @torture: if the physical eraseblock has to be tortured 759 * 760 * This function returns zero in case of success and a %-ENOMEM in case of 761 * failure. 762 */ 763 static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, 764 int torture) 765 { 766 struct ubi_work *wl_wrk; 767 768 dbg_wl("schedule erasure of PEB %d, EC %d, torture %d", 769 e->pnum, e->ec, torture); 770 771 wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_KERNEL); 772 if (!wl_wrk) 773 return -ENOMEM; 774 775 wl_wrk->func = &erase_worker; 776 wl_wrk->e = e; 777 wl_wrk->torture = torture; 778 779 schedule_ubi_work(ubi, wl_wrk); 780 return 0; 781 } 782 783 /** 784 * wear_leveling_worker - wear-leveling worker function. 785 * @ubi: UBI device description object 786 * @wrk: the work object 787 * @cancel: non-zero if the worker has to free memory and exit 788 * 789 * This function copies a more worn out physical eraseblock to a less worn out 790 * one. Returns zero in case of success and a negative error code in case of 791 * failure. 792 */ 793 static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, 794 int cancel) 795 { 796 int err, put = 0; 797 struct ubi_wl_entry *e1, *e2; 798 struct ubi_vid_hdr *vid_hdr; 799 800 kfree(wrk); 801 802 if (cancel) 803 return 0; 804 805 vid_hdr = ubi_zalloc_vid_hdr(ubi); 806 if (!vid_hdr) 807 return -ENOMEM; 808 809 spin_lock(&ubi->wl_lock); 810 811 /* 812 * Only one WL worker at a time is supported at this implementation, so 813 * make sure a PEB is not being moved already. 814 */ 815 if (ubi->move_to || tree_empty(&ubi->free) || 816 (tree_empty(&ubi->used) && tree_empty(&ubi->scrub))) { 817 /* 818 * Only one WL worker at a time is supported at this 819 * implementation, so if a LEB is already being moved, cancel. 820 * 821 * No free physical eraseblocks? Well, we cancel wear-leveling 822 * then. It will be triggered again when a free physical 823 * eraseblock appears. 824 * 825 * No used physical eraseblocks? They must be temporarily 826 * protected from being moved. They will be moved to the 827 * @ubi->used tree later and the wear-leveling will be 828 * triggered again. 829 */ 830 dbg_wl("cancel WL, a list is empty: free %d, used %d", 831 tree_empty(&ubi->free), tree_empty(&ubi->used)); 832 ubi->wl_scheduled = 0; 833 spin_unlock(&ubi->wl_lock); 834 ubi_free_vid_hdr(ubi, vid_hdr); 835 return 0; 836 } 837 838 if (tree_empty(&ubi->scrub)) { 839 /* 840 * Now pick the least worn-out used physical eraseblock and a 841 * highly worn-out free physical eraseblock. If the erase 842 * counters differ much enough, start wear-leveling. 843 */ 844 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, rb); 845 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); 846 847 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) { 848 dbg_wl("no WL needed: min used EC %d, max free EC %d", 849 e1->ec, e2->ec); 850 ubi->wl_scheduled = 0; 851 spin_unlock(&ubi->wl_lock); 852 ubi_free_vid_hdr(ubi, vid_hdr); 853 return 0; 854 } 855 used_tree_del(ubi, e1); 856 dbg_wl("move PEB %d EC %d to PEB %d EC %d", 857 e1->pnum, e1->ec, e2->pnum, e2->ec); 858 } else { 859 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, rb); 860 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); 861 scrub_tree_del(ubi, e1); 862 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum); 863 } 864 865 free_tree_del(ubi, e2); 866 ubi_assert(!ubi->move_from && !ubi->move_to); 867 ubi_assert(!ubi->move_to_put && !ubi->move_from_put); 868 ubi->move_from = e1; 869 ubi->move_to = e2; 870 spin_unlock(&ubi->wl_lock); 871 872 /* 873 * Now we are going to copy physical eraseblock @e1->pnum to @e2->pnum. 874 * We so far do not know which logical eraseblock our physical 875 * eraseblock (@e1) belongs to. We have to read the volume identifier 876 * header first. 877 */ 878 879 err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0); 880 if (err && err != UBI_IO_BITFLIPS) { 881 if (err == UBI_IO_PEB_FREE) { 882 /* 883 * We are trying to move PEB without a VID header. UBI 884 * always write VID headers shortly after the PEB was 885 * given, so we have a situation when it did not have 886 * chance to write it down because it was preempted. 887 * Just re-schedule the work, so that next time it will 888 * likely have the VID header in place. 889 */ 890 dbg_wl("PEB %d has no VID header", e1->pnum); 891 err = 0; 892 } else { 893 ubi_err("error %d while reading VID header from PEB %d", 894 err, e1->pnum); 895 if (err > 0) 896 err = -EIO; 897 } 898 goto error; 899 } 900 901 err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr); 902 if (err) { 903 if (err == UBI_IO_BITFLIPS) 904 err = 0; 905 goto error; 906 } 907 908 ubi_free_vid_hdr(ubi, vid_hdr); 909 spin_lock(&ubi->wl_lock); 910 if (!ubi->move_to_put) 911 used_tree_add(ubi, e2); 912 else 913 put = 1; 914 ubi->move_from = ubi->move_to = NULL; 915 ubi->move_from_put = ubi->move_to_put = 0; 916 ubi->wl_scheduled = 0; 917 spin_unlock(&ubi->wl_lock); 918 919 if (put) { 920 /* 921 * Well, the target PEB was put meanwhile, schedule it for 922 * erasure. 923 */ 924 dbg_wl("PEB %d was put meanwhile, erase", e2->pnum); 925 err = schedule_erase(ubi, e2, 0); 926 if (err) { 927 kmem_cache_free(wl_entries_slab, e2); 928 ubi_ro_mode(ubi); 929 } 930 } 931 932 err = schedule_erase(ubi, e1, 0); 933 if (err) { 934 kmem_cache_free(wl_entries_slab, e1); 935 ubi_ro_mode(ubi); 936 } 937 938 dbg_wl("done"); 939 return err; 940 941 /* 942 * Some error occurred. @e1 was not changed, so return it back. @e2 943 * might be changed, schedule it for erasure. 944 */ 945 error: 946 if (err) 947 dbg_wl("error %d occurred, cancel operation", err); 948 ubi_assert(err <= 0); 949 950 ubi_free_vid_hdr(ubi, vid_hdr); 951 spin_lock(&ubi->wl_lock); 952 ubi->wl_scheduled = 0; 953 if (ubi->move_from_put) 954 put = 1; 955 else 956 used_tree_add(ubi, e1); 957 ubi->move_from = ubi->move_to = NULL; 958 ubi->move_from_put = ubi->move_to_put = 0; 959 spin_unlock(&ubi->wl_lock); 960 961 if (put) { 962 /* 963 * Well, the target PEB was put meanwhile, schedule it for 964 * erasure. 965 */ 966 dbg_wl("PEB %d was put meanwhile, erase", e1->pnum); 967 err = schedule_erase(ubi, e1, 0); 968 if (err) { 969 kmem_cache_free(wl_entries_slab, e1); 970 ubi_ro_mode(ubi); 971 } 972 } 973 974 err = schedule_erase(ubi, e2, 0); 975 if (err) { 976 kmem_cache_free(wl_entries_slab, e2); 977 ubi_ro_mode(ubi); 978 } 979 980 yield(); 981 return err; 982 } 983 984 /** 985 * ensure_wear_leveling - schedule wear-leveling if it is needed. 986 * @ubi: UBI device description object 987 * 988 * This function checks if it is time to start wear-leveling and schedules it 989 * if yes. This function returns zero in case of success and a negative error 990 * code in case of failure. 991 */ 992 static int ensure_wear_leveling(struct ubi_device *ubi) 993 { 994 int err = 0; 995 struct ubi_wl_entry *e1; 996 struct ubi_wl_entry *e2; 997 struct ubi_work *wrk; 998 999 spin_lock(&ubi->wl_lock); 1000 if (ubi->wl_scheduled) 1001 /* Wear-leveling is already in the work queue */ 1002 goto out_unlock; 1003 1004 /* 1005 * If the ubi->scrub tree is not empty, scrubbing is needed, and the 1006 * the WL worker has to be scheduled anyway. 1007 */ 1008 if (tree_empty(&ubi->scrub)) { 1009 if (tree_empty(&ubi->used) || tree_empty(&ubi->free)) 1010 /* No physical eraseblocks - no deal */ 1011 goto out_unlock; 1012 1013 /* 1014 * We schedule wear-leveling only if the difference between the 1015 * lowest erase counter of used physical eraseblocks and a high 1016 * erase counter of free physical eraseblocks is greater then 1017 * %UBI_WL_THRESHOLD. 1018 */ 1019 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, rb); 1020 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); 1021 1022 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) 1023 goto out_unlock; 1024 dbg_wl("schedule wear-leveling"); 1025 } else 1026 dbg_wl("schedule scrubbing"); 1027 1028 ubi->wl_scheduled = 1; 1029 spin_unlock(&ubi->wl_lock); 1030 1031 wrk = kmalloc(sizeof(struct ubi_work), GFP_KERNEL); 1032 if (!wrk) { 1033 err = -ENOMEM; 1034 goto out_cancel; 1035 } 1036 1037 wrk->func = &wear_leveling_worker; 1038 schedule_ubi_work(ubi, wrk); 1039 return err; 1040 1041 out_cancel: 1042 spin_lock(&ubi->wl_lock); 1043 ubi->wl_scheduled = 0; 1044 out_unlock: 1045 spin_unlock(&ubi->wl_lock); 1046 return err; 1047 } 1048 1049 /** 1050 * erase_worker - physical eraseblock erase worker function. 1051 * @ubi: UBI device description object 1052 * @wl_wrk: the work object 1053 * @cancel: non-zero if the worker has to free memory and exit 1054 * 1055 * This function erases a physical eraseblock and perform torture testing if 1056 * needed. It also takes care about marking the physical eraseblock bad if 1057 * needed. Returns zero in case of success and a negative error code in case of 1058 * failure. 1059 */ 1060 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, 1061 int cancel) 1062 { 1063 struct ubi_wl_entry *e = wl_wrk->e; 1064 int pnum = e->pnum, err, need; 1065 1066 if (cancel) { 1067 dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec); 1068 kfree(wl_wrk); 1069 kmem_cache_free(wl_entries_slab, e); 1070 return 0; 1071 } 1072 1073 dbg_wl("erase PEB %d EC %d", pnum, e->ec); 1074 1075 err = sync_erase(ubi, e, wl_wrk->torture); 1076 if (!err) { 1077 /* Fine, we've erased it successfully */ 1078 kfree(wl_wrk); 1079 1080 spin_lock(&ubi->wl_lock); 1081 ubi->abs_ec += 1; 1082 free_tree_add(ubi, e); 1083 spin_unlock(&ubi->wl_lock); 1084 1085 /* 1086 * One more erase operation has happened, take care about protected 1087 * physical eraseblocks. 1088 */ 1089 check_protection_over(ubi); 1090 1091 /* And take care about wear-leveling */ 1092 err = ensure_wear_leveling(ubi); 1093 return err; 1094 } 1095 1096 kfree(wl_wrk); 1097 kmem_cache_free(wl_entries_slab, e); 1098 1099 if (err == -EINTR || err == -ENOMEM || err == -EAGAIN || 1100 err == -EBUSY) { 1101 int err1; 1102 1103 /* Re-schedule the LEB for erasure */ 1104 err1 = schedule_erase(ubi, e, 0); 1105 if (err1) { 1106 err = err1; 1107 goto out_ro; 1108 } 1109 return err; 1110 } else if (err != -EIO) { 1111 /* 1112 * If this is not %-EIO, we have no idea what to do. Scheduling 1113 * this physical eraseblock for erasure again would cause 1114 * errors again and again. Well, lets switch to RO mode. 1115 */ 1116 goto out_ro; 1117 } 1118 1119 /* It is %-EIO, the PEB went bad */ 1120 1121 if (!ubi->bad_allowed) { 1122 ubi_err("bad physical eraseblock %d detected", pnum); 1123 goto out_ro; 1124 } 1125 1126 spin_lock(&ubi->volumes_lock); 1127 need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs + 1; 1128 if (need > 0) { 1129 need = ubi->avail_pebs >= need ? need : ubi->avail_pebs; 1130 ubi->avail_pebs -= need; 1131 ubi->rsvd_pebs += need; 1132 ubi->beb_rsvd_pebs += need; 1133 if (need > 0) 1134 ubi_msg("reserve more %d PEBs", need); 1135 } 1136 1137 if (ubi->beb_rsvd_pebs == 0) { 1138 spin_unlock(&ubi->volumes_lock); 1139 ubi_err("no reserved physical eraseblocks"); 1140 goto out_ro; 1141 } 1142 1143 spin_unlock(&ubi->volumes_lock); 1144 ubi_msg("mark PEB %d as bad", pnum); 1145 1146 err = ubi_io_mark_bad(ubi, pnum); 1147 if (err) 1148 goto out_ro; 1149 1150 spin_lock(&ubi->volumes_lock); 1151 ubi->beb_rsvd_pebs -= 1; 1152 ubi->bad_peb_count += 1; 1153 ubi->good_peb_count -= 1; 1154 ubi_calculate_reserved(ubi); 1155 if (ubi->beb_rsvd_pebs == 0) 1156 ubi_warn("last PEB from the reserved pool was used"); 1157 spin_unlock(&ubi->volumes_lock); 1158 1159 return err; 1160 1161 out_ro: 1162 ubi_ro_mode(ubi); 1163 return err; 1164 } 1165 1166 /** 1167 * ubi_wl_put_peb - return a physical eraseblock to the wear-leveling 1168 * unit. 1169 * @ubi: UBI device description object 1170 * @pnum: physical eraseblock to return 1171 * @torture: if this physical eraseblock has to be tortured 1172 * 1173 * This function is called to return physical eraseblock @pnum to the pool of 1174 * free physical eraseblocks. The @torture flag has to be set if an I/O error 1175 * occurred to this @pnum and it has to be tested. This function returns zero 1176 * in case of success and a negative error code in case of failure. 1177 */ 1178 int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture) 1179 { 1180 int err; 1181 struct ubi_wl_entry *e; 1182 1183 dbg_wl("PEB %d", pnum); 1184 ubi_assert(pnum >= 0); 1185 ubi_assert(pnum < ubi->peb_count); 1186 1187 spin_lock(&ubi->wl_lock); 1188 1189 e = ubi->lookuptbl[pnum]; 1190 if (e == ubi->move_from) { 1191 /* 1192 * User is putting the physical eraseblock which was selected to 1193 * be moved. It will be scheduled for erasure in the 1194 * wear-leveling worker. 1195 */ 1196 dbg_wl("PEB %d is being moved", pnum); 1197 ubi_assert(!ubi->move_from_put); 1198 ubi->move_from_put = 1; 1199 spin_unlock(&ubi->wl_lock); 1200 return 0; 1201 } else if (e == ubi->move_to) { 1202 /* 1203 * User is putting the physical eraseblock which was selected 1204 * as the target the data is moved to. It may happen if the EBA 1205 * unit already re-mapped the LEB but the WL unit did has not 1206 * put the PEB to the "used" tree. 1207 */ 1208 dbg_wl("PEB %d is the target of data moving", pnum); 1209 ubi_assert(!ubi->move_to_put); 1210 ubi->move_to_put = 1; 1211 spin_unlock(&ubi->wl_lock); 1212 return 0; 1213 } else { 1214 if (in_wl_tree(e, &ubi->used)) 1215 used_tree_del(ubi, e); 1216 else if (in_wl_tree(e, &ubi->scrub)) 1217 scrub_tree_del(ubi, e); 1218 else 1219 prot_tree_del(ubi, e->pnum); 1220 } 1221 spin_unlock(&ubi->wl_lock); 1222 1223 err = schedule_erase(ubi, e, torture); 1224 if (err) { 1225 spin_lock(&ubi->wl_lock); 1226 used_tree_add(ubi, e); 1227 spin_unlock(&ubi->wl_lock); 1228 } 1229 1230 return err; 1231 } 1232 1233 /** 1234 * ubi_wl_scrub_peb - schedule a physical eraseblock for scrubbing. 1235 * @ubi: UBI device description object 1236 * @pnum: the physical eraseblock to schedule 1237 * 1238 * If a bit-flip in a physical eraseblock is detected, this physical eraseblock 1239 * needs scrubbing. This function schedules a physical eraseblock for 1240 * scrubbing which is done in background. This function returns zero in case of 1241 * success and a negative error code in case of failure. 1242 */ 1243 int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum) 1244 { 1245 struct ubi_wl_entry *e; 1246 1247 ubi_msg("schedule PEB %d for scrubbing", pnum); 1248 1249 retry: 1250 spin_lock(&ubi->wl_lock); 1251 e = ubi->lookuptbl[pnum]; 1252 if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub)) { 1253 spin_unlock(&ubi->wl_lock); 1254 return 0; 1255 } 1256 1257 if (e == ubi->move_to) { 1258 /* 1259 * This physical eraseblock was used to move data to. The data 1260 * was moved but the PEB was not yet inserted to the proper 1261 * tree. We should just wait a little and let the WL worker 1262 * proceed. 1263 */ 1264 spin_unlock(&ubi->wl_lock); 1265 dbg_wl("the PEB %d is not in proper tree, retry", pnum); 1266 yield(); 1267 goto retry; 1268 } 1269 1270 if (in_wl_tree(e, &ubi->used)) 1271 used_tree_del(ubi, e); 1272 else 1273 prot_tree_del(ubi, pnum); 1274 1275 scrub_tree_add(ubi, e); 1276 spin_unlock(&ubi->wl_lock); 1277 1278 /* 1279 * Technically scrubbing is the same as wear-leveling, so it is done 1280 * by the WL worker. 1281 */ 1282 return ensure_wear_leveling(ubi); 1283 } 1284 1285 /** 1286 * ubi_wl_flush - flush all pending works. 1287 * @ubi: UBI device description object 1288 * 1289 * This function returns zero in case of success and a negative error code in 1290 * case of failure. 1291 */ 1292 int ubi_wl_flush(struct ubi_device *ubi) 1293 { 1294 int err, pending_count; 1295 1296 pending_count = ubi->works_count; 1297 1298 dbg_wl("flush (%d pending works)", pending_count); 1299 1300 /* 1301 * Erase while the pending works queue is not empty, but not more then 1302 * the number of currently pending works. 1303 */ 1304 while (pending_count-- > 0) { 1305 err = do_work(ubi); 1306 if (err) 1307 return err; 1308 } 1309 1310 return 0; 1311 } 1312 1313 /** 1314 * tree_destroy - destroy an RB-tree. 1315 * @root: the root of the tree to destroy 1316 */ 1317 static void tree_destroy(struct rb_root *root) 1318 { 1319 struct rb_node *rb; 1320 struct ubi_wl_entry *e; 1321 1322 rb = root->rb_node; 1323 while (rb) { 1324 if (rb->rb_left) 1325 rb = rb->rb_left; 1326 else if (rb->rb_right) 1327 rb = rb->rb_right; 1328 else { 1329 e = rb_entry(rb, struct ubi_wl_entry, rb); 1330 1331 rb = rb_parent(rb); 1332 if (rb) { 1333 if (rb->rb_left == &e->rb) 1334 rb->rb_left = NULL; 1335 else 1336 rb->rb_right = NULL; 1337 } 1338 1339 kmem_cache_free(wl_entries_slab, e); 1340 } 1341 } 1342 } 1343 1344 /** 1345 * ubi_thread - UBI background thread. 1346 * @u: the UBI device description object pointer 1347 */ 1348 static int ubi_thread(void *u) 1349 { 1350 int failures = 0; 1351 struct ubi_device *ubi = u; 1352 1353 ubi_msg("background thread \"%s\" started, PID %d", 1354 ubi->bgt_name, current->pid); 1355 1356 set_freezable(); 1357 for (;;) { 1358 int err; 1359 1360 if (kthread_should_stop()) 1361 goto out; 1362 1363 if (try_to_freeze()) 1364 continue; 1365 1366 spin_lock(&ubi->wl_lock); 1367 if (list_empty(&ubi->works) || ubi->ro_mode || 1368 !ubi->thread_enabled) { 1369 set_current_state(TASK_INTERRUPTIBLE); 1370 spin_unlock(&ubi->wl_lock); 1371 schedule(); 1372 continue; 1373 } 1374 spin_unlock(&ubi->wl_lock); 1375 1376 err = do_work(ubi); 1377 if (err) { 1378 ubi_err("%s: work failed with error code %d", 1379 ubi->bgt_name, err); 1380 if (failures++ > WL_MAX_FAILURES) { 1381 /* 1382 * Too many failures, disable the thread and 1383 * switch to read-only mode. 1384 */ 1385 ubi_msg("%s: %d consecutive failures", 1386 ubi->bgt_name, WL_MAX_FAILURES); 1387 ubi_ro_mode(ubi); 1388 break; 1389 } 1390 } else 1391 failures = 0; 1392 1393 cond_resched(); 1394 } 1395 1396 out: 1397 dbg_wl("background thread \"%s\" is killed", ubi->bgt_name); 1398 return 0; 1399 } 1400 1401 /** 1402 * cancel_pending - cancel all pending works. 1403 * @ubi: UBI device description object 1404 */ 1405 static void cancel_pending(struct ubi_device *ubi) 1406 { 1407 while (!list_empty(&ubi->works)) { 1408 struct ubi_work *wrk; 1409 1410 wrk = list_entry(ubi->works.next, struct ubi_work, list); 1411 list_del(&wrk->list); 1412 wrk->func(ubi, wrk, 1); 1413 ubi->works_count -= 1; 1414 ubi_assert(ubi->works_count >= 0); 1415 } 1416 } 1417 1418 /** 1419 * ubi_wl_init_scan - initialize the wear-leveling unit using scanning 1420 * information. 1421 * @ubi: UBI device description object 1422 * @si: scanning information 1423 * 1424 * This function returns zero in case of success, and a negative error code in 1425 * case of failure. 1426 */ 1427 int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si) 1428 { 1429 int err; 1430 struct rb_node *rb1, *rb2; 1431 struct ubi_scan_volume *sv; 1432 struct ubi_scan_leb *seb, *tmp; 1433 struct ubi_wl_entry *e; 1434 1435 1436 ubi->used = ubi->free = ubi->scrub = RB_ROOT; 1437 ubi->prot.pnum = ubi->prot.aec = RB_ROOT; 1438 spin_lock_init(&ubi->wl_lock); 1439 ubi->max_ec = si->max_ec; 1440 INIT_LIST_HEAD(&ubi->works); 1441 1442 sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num); 1443 1444 ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name); 1445 if (IS_ERR(ubi->bgt_thread)) { 1446 err = PTR_ERR(ubi->bgt_thread); 1447 ubi_err("cannot spawn \"%s\", error %d", ubi->bgt_name, 1448 err); 1449 return err; 1450 } 1451 1452 if (ubi_devices_cnt == 0) { 1453 wl_entries_slab = kmem_cache_create("ubi_wl_entry_slab", 1454 sizeof(struct ubi_wl_entry), 1455 0, 0, NULL); 1456 if (!wl_entries_slab) 1457 return -ENOMEM; 1458 } 1459 1460 err = -ENOMEM; 1461 ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL); 1462 if (!ubi->lookuptbl) 1463 goto out_free; 1464 1465 list_for_each_entry_safe(seb, tmp, &si->erase, u.list) { 1466 cond_resched(); 1467 1468 e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL); 1469 if (!e) 1470 goto out_free; 1471 1472 e->pnum = seb->pnum; 1473 e->ec = seb->ec; 1474 ubi->lookuptbl[e->pnum] = e; 1475 if (schedule_erase(ubi, e, 0)) { 1476 kmem_cache_free(wl_entries_slab, e); 1477 goto out_free; 1478 } 1479 } 1480 1481 list_for_each_entry(seb, &si->free, u.list) { 1482 cond_resched(); 1483 1484 e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL); 1485 if (!e) 1486 goto out_free; 1487 1488 e->pnum = seb->pnum; 1489 e->ec = seb->ec; 1490 ubi_assert(e->ec >= 0); 1491 free_tree_add(ubi, e); 1492 ubi->lookuptbl[e->pnum] = e; 1493 } 1494 1495 list_for_each_entry(seb, &si->corr, u.list) { 1496 cond_resched(); 1497 1498 e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL); 1499 if (!e) 1500 goto out_free; 1501 1502 e->pnum = seb->pnum; 1503 e->ec = seb->ec; 1504 ubi->lookuptbl[e->pnum] = e; 1505 if (schedule_erase(ubi, e, 0)) { 1506 kmem_cache_free(wl_entries_slab, e); 1507 goto out_free; 1508 } 1509 } 1510 1511 ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) { 1512 ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) { 1513 cond_resched(); 1514 1515 e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL); 1516 if (!e) 1517 goto out_free; 1518 1519 e->pnum = seb->pnum; 1520 e->ec = seb->ec; 1521 ubi->lookuptbl[e->pnum] = e; 1522 if (!seb->scrub) { 1523 dbg_wl("add PEB %d EC %d to the used tree", 1524 e->pnum, e->ec); 1525 used_tree_add(ubi, e); 1526 } else { 1527 dbg_wl("add PEB %d EC %d to the scrub tree", 1528 e->pnum, e->ec); 1529 scrub_tree_add(ubi, e); 1530 } 1531 } 1532 } 1533 1534 if (WL_RESERVED_PEBS > ubi->avail_pebs) { 1535 ubi_err("no enough physical eraseblocks (%d, need %d)", 1536 ubi->avail_pebs, WL_RESERVED_PEBS); 1537 goto out_free; 1538 } 1539 ubi->avail_pebs -= WL_RESERVED_PEBS; 1540 ubi->rsvd_pebs += WL_RESERVED_PEBS; 1541 1542 /* Schedule wear-leveling if needed */ 1543 err = ensure_wear_leveling(ubi); 1544 if (err) 1545 goto out_free; 1546 1547 return 0; 1548 1549 out_free: 1550 cancel_pending(ubi); 1551 tree_destroy(&ubi->used); 1552 tree_destroy(&ubi->free); 1553 tree_destroy(&ubi->scrub); 1554 kfree(ubi->lookuptbl); 1555 if (ubi_devices_cnt == 0) 1556 kmem_cache_destroy(wl_entries_slab); 1557 return err; 1558 } 1559 1560 /** 1561 * protection_trees_destroy - destroy the protection RB-trees. 1562 * @ubi: UBI device description object 1563 */ 1564 static void protection_trees_destroy(struct ubi_device *ubi) 1565 { 1566 struct rb_node *rb; 1567 struct ubi_wl_prot_entry *pe; 1568 1569 rb = ubi->prot.aec.rb_node; 1570 while (rb) { 1571 if (rb->rb_left) 1572 rb = rb->rb_left; 1573 else if (rb->rb_right) 1574 rb = rb->rb_right; 1575 else { 1576 pe = rb_entry(rb, struct ubi_wl_prot_entry, rb_aec); 1577 1578 rb = rb_parent(rb); 1579 if (rb) { 1580 if (rb->rb_left == &pe->rb_aec) 1581 rb->rb_left = NULL; 1582 else 1583 rb->rb_right = NULL; 1584 } 1585 1586 kmem_cache_free(wl_entries_slab, pe->e); 1587 kfree(pe); 1588 } 1589 } 1590 } 1591 1592 /** 1593 * ubi_wl_close - close the wear-leveling unit. 1594 * @ubi: UBI device description object 1595 */ 1596 void ubi_wl_close(struct ubi_device *ubi) 1597 { 1598 dbg_wl("disable \"%s\"", ubi->bgt_name); 1599 if (ubi->bgt_thread) 1600 kthread_stop(ubi->bgt_thread); 1601 1602 dbg_wl("close the UBI wear-leveling unit"); 1603 1604 cancel_pending(ubi); 1605 protection_trees_destroy(ubi); 1606 tree_destroy(&ubi->used); 1607 tree_destroy(&ubi->free); 1608 tree_destroy(&ubi->scrub); 1609 kfree(ubi->lookuptbl); 1610 if (ubi_devices_cnt == 1) 1611 kmem_cache_destroy(wl_entries_slab); 1612 } 1613 1614 #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID 1615 1616 /** 1617 * paranoid_check_ec - make sure that the erase counter of a physical eraseblock 1618 * is correct. 1619 * @ubi: UBI device description object 1620 * @pnum: the physical eraseblock number to check 1621 * @ec: the erase counter to check 1622 * 1623 * This function returns zero if the erase counter of physical eraseblock @pnum 1624 * is equivalent to @ec, %1 if not, and a negative error code if an error 1625 * occurred. 1626 */ 1627 static int paranoid_check_ec(const struct ubi_device *ubi, int pnum, int ec) 1628 { 1629 int err; 1630 long long read_ec; 1631 struct ubi_ec_hdr *ec_hdr; 1632 1633 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); 1634 if (!ec_hdr) 1635 return -ENOMEM; 1636 1637 err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0); 1638 if (err && err != UBI_IO_BITFLIPS) { 1639 /* The header does not have to exist */ 1640 err = 0; 1641 goto out_free; 1642 } 1643 1644 read_ec = be64_to_cpu(ec_hdr->ec); 1645 if (ec != read_ec) { 1646 ubi_err("paranoid check failed for PEB %d", pnum); 1647 ubi_err("read EC is %lld, should be %d", read_ec, ec); 1648 ubi_dbg_dump_stack(); 1649 err = 1; 1650 } else 1651 err = 0; 1652 1653 out_free: 1654 kfree(ec_hdr); 1655 return err; 1656 } 1657 1658 /** 1659 * paranoid_check_in_wl_tree - make sure that a wear-leveling entry is present 1660 * in a WL RB-tree. 1661 * @e: the wear-leveling entry to check 1662 * @root: the root of the tree 1663 * 1664 * This function returns zero if @e is in the @root RB-tree and %1 if it 1665 * is not. 1666 */ 1667 static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e, 1668 struct rb_root *root) 1669 { 1670 if (in_wl_tree(e, root)) 1671 return 0; 1672 1673 ubi_err("paranoid check failed for PEB %d, EC %d, RB-tree %p ", 1674 e->pnum, e->ec, root); 1675 ubi_dbg_dump_stack(); 1676 return 1; 1677 } 1678 1679 #endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */ 1680