1 /* 2 * @ubi: UBI device description object 3 * Copyright (c) International Business Machines Corp., 2006 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See 13 * the GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 * 19 * Authors: Artem Bityutskiy (Битюцкий Артём), Thomas Gleixner 20 */ 21 22 /* 23 * UBI wear-leveling sub-system. 24 * 25 * This sub-system is responsible for wear-leveling. It works in terms of 26 * physical eraseblocks and erase counters and knows nothing about logical 27 * eraseblocks, volumes, etc. From this sub-system's perspective all physical 28 * eraseblocks are of two types - used and free. Used physical eraseblocks are 29 * those that were "get" by the 'ubi_wl_get_peb()' function, and free physical 30 * eraseblocks are those that were put by the 'ubi_wl_put_peb()' function. 31 * 32 * Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter 33 * header. The rest of the physical eraseblock contains only %0xFF bytes. 34 * 35 * When physical eraseblocks are returned to the WL sub-system by means of the 36 * 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is 37 * done asynchronously in context of the per-UBI device background thread, 38 * which is also managed by the WL sub-system. 39 * 40 * The wear-leveling is ensured by means of moving the contents of used 41 * physical eraseblocks with low erase counter to free physical eraseblocks 42 * with high erase counter. 43 * 44 * If the WL sub-system fails to erase a physical eraseblock, it marks it as 45 * bad. 46 * 47 * This sub-system is also responsible for scrubbing. If a bit-flip is detected 48 * in a physical eraseblock, it has to be moved. Technically this is the same 49 * as moving it for wear-leveling reasons. 50 * 51 * As it was said, for the UBI sub-system all physical eraseblocks are either 52 * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while 53 * used eraseblocks are kept in @wl->used, @wl->erroneous, or @wl->scrub 54 * RB-trees, as well as (temporarily) in the @wl->pq queue. 55 * 56 * When the WL sub-system returns a physical eraseblock, the physical 57 * eraseblock is protected from being moved for some "time". For this reason, 58 * the physical eraseblock is not directly moved from the @wl->free tree to the 59 * @wl->used tree. There is a protection queue in between where this 60 * physical eraseblock is temporarily stored (@wl->pq). 61 * 62 * All this protection stuff is needed because: 63 * o we don't want to move physical eraseblocks just after we have given them 64 * to the user; instead, we first want to let users fill them up with data; 65 * 66 * o there is a chance that the user will put the physical eraseblock very 67 * soon, so it makes sense not to move it for some time, but wait. 68 * 69 * Physical eraseblocks stay protected only for limited time. But the "time" is 70 * measured in erase cycles in this case. This is implemented with help of the 71 * protection queue. Eraseblocks are put to the tail of this queue when they 72 * are returned by the 'ubi_wl_get_peb()', and eraseblocks are removed from the 73 * head of the queue on each erase operation (for any eraseblock). So the 74 * length of the queue defines how may (global) erase cycles PEBs are protected. 75 * 76 * To put it differently, each physical eraseblock has 2 main states: free and 77 * used. The former state corresponds to the @wl->free tree. The latter state 78 * is split up on several sub-states: 79 * o the WL movement is allowed (@wl->used tree); 80 * o the WL movement is disallowed (@wl->erroneous) because the PEB is 81 * erroneous - e.g., there was a read error; 82 * o the WL movement is temporarily prohibited (@wl->pq queue); 83 * o scrubbing is needed (@wl->scrub tree). 84 * 85 * Depending on the sub-state, wear-leveling entries of the used physical 86 * eraseblocks may be kept in one of those structures. 87 * 88 * Note, in this implementation, we keep a small in-RAM object for each physical 89 * eraseblock. This is surely not a scalable solution. But it appears to be good 90 * enough for moderately large flashes and it is simple. In future, one may 91 * re-work this sub-system and make it more scalable. 92 * 93 * At the moment this sub-system does not utilize the sequence number, which 94 * was introduced relatively recently. But it would be wise to do this because 95 * the sequence number of a logical eraseblock characterizes how old is it. For 96 * example, when we move a PEB with low erase counter, and we need to pick the 97 * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we 98 * pick target PEB with an average EC if our PEB is not very "old". This is a 99 * room for future re-works of the WL sub-system. 100 */ 101 102 #include <linux/slab.h> 103 #include <linux/crc32.h> 104 #include <linux/freezer.h> 105 #include <linux/kthread.h> 106 #include "ubi.h" 107 108 /* Number of physical eraseblocks reserved for wear-leveling purposes */ 109 #define WL_RESERVED_PEBS 1 110 111 /* 112 * Maximum difference between two erase counters. If this threshold is 113 * exceeded, the WL sub-system starts moving data from used physical 114 * eraseblocks with low erase counter to free physical eraseblocks with high 115 * erase counter. 116 */ 117 #define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD 118 119 /* 120 * When a physical eraseblock is moved, the WL sub-system has to pick the target 121 * physical eraseblock to move to. The simplest way would be just to pick the 122 * one with the highest erase counter. But in certain workloads this could lead 123 * to an unlimited wear of one or few physical eraseblock. Indeed, imagine a 124 * situation when the picked physical eraseblock is constantly erased after the 125 * data is written to it. So, we have a constant which limits the highest erase 126 * counter of the free physical eraseblock to pick. Namely, the WL sub-system 127 * does not pick eraseblocks with erase counter greater than the lowest erase 128 * counter plus %WL_FREE_MAX_DIFF. 129 */ 130 #define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD) 131 132 /* 133 * Maximum number of consecutive background thread failures which is enough to 134 * switch to read-only mode. 135 */ 136 #define WL_MAX_FAILURES 32 137 138 /** 139 * struct ubi_work - UBI work description data structure. 140 * @list: a link in the list of pending works 141 * @func: worker function 142 * @e: physical eraseblock to erase 143 * @vol_id: the volume ID on which this erasure is being performed 144 * @lnum: the logical eraseblock number 145 * @torture: if the physical eraseblock has to be tortured 146 * 147 * The @func pointer points to the worker function. If the @cancel argument is 148 * not zero, the worker has to free the resources and exit immediately. The 149 * worker has to return zero in case of success and a negative error code in 150 * case of failure. 151 */ 152 struct ubi_work { 153 struct list_head list; 154 int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int cancel); 155 /* The below fields are only relevant to erasure works */ 156 struct ubi_wl_entry *e; 157 int vol_id; 158 int lnum; 159 int torture; 160 }; 161 162 static int self_check_ec(struct ubi_device *ubi, int pnum, int ec); 163 static int self_check_in_wl_tree(const struct ubi_device *ubi, 164 struct ubi_wl_entry *e, struct rb_root *root); 165 static int self_check_in_pq(const struct ubi_device *ubi, 166 struct ubi_wl_entry *e); 167 168 /** 169 * wl_tree_add - add a wear-leveling entry to a WL RB-tree. 170 * @e: the wear-leveling entry to add 171 * @root: the root of the tree 172 * 173 * Note, we use (erase counter, physical eraseblock number) pairs as keys in 174 * the @ubi->used and @ubi->free RB-trees. 175 */ 176 static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root) 177 { 178 struct rb_node **p, *parent = NULL; 179 180 p = &root->rb_node; 181 while (*p) { 182 struct ubi_wl_entry *e1; 183 184 parent = *p; 185 e1 = rb_entry(parent, struct ubi_wl_entry, u.rb); 186 187 if (e->ec < e1->ec) 188 p = &(*p)->rb_left; 189 else if (e->ec > e1->ec) 190 p = &(*p)->rb_right; 191 else { 192 ubi_assert(e->pnum != e1->pnum); 193 if (e->pnum < e1->pnum) 194 p = &(*p)->rb_left; 195 else 196 p = &(*p)->rb_right; 197 } 198 } 199 200 rb_link_node(&e->u.rb, parent, p); 201 rb_insert_color(&e->u.rb, root); 202 } 203 204 /** 205 * do_work - do one pending work. 206 * @ubi: UBI device description object 207 * 208 * This function returns zero in case of success and a negative error code in 209 * case of failure. 210 */ 211 static int do_work(struct ubi_device *ubi) 212 { 213 int err; 214 struct ubi_work *wrk; 215 216 cond_resched(); 217 218 /* 219 * @ubi->work_sem is used to synchronize with the workers. Workers take 220 * it in read mode, so many of them may be doing works at a time. But 221 * the queue flush code has to be sure the whole queue of works is 222 * done, and it takes the mutex in write mode. 223 */ 224 down_read(&ubi->work_sem); 225 spin_lock(&ubi->wl_lock); 226 if (list_empty(&ubi->works)) { 227 spin_unlock(&ubi->wl_lock); 228 up_read(&ubi->work_sem); 229 return 0; 230 } 231 232 wrk = list_entry(ubi->works.next, struct ubi_work, list); 233 list_del(&wrk->list); 234 ubi->works_count -= 1; 235 ubi_assert(ubi->works_count >= 0); 236 spin_unlock(&ubi->wl_lock); 237 238 /* 239 * Call the worker function. Do not touch the work structure 240 * after this call as it will have been freed or reused by that 241 * time by the worker function. 242 */ 243 err = wrk->func(ubi, wrk, 0); 244 if (err) 245 ubi_err("work failed with error code %d", err); 246 up_read(&ubi->work_sem); 247 248 return err; 249 } 250 251 /** 252 * produce_free_peb - produce a free physical eraseblock. 253 * @ubi: UBI device description object 254 * 255 * This function tries to make a free PEB by means of synchronous execution of 256 * pending works. This may be needed if, for example the background thread is 257 * disabled. Returns zero in case of success and a negative error code in case 258 * of failure. 259 */ 260 static int produce_free_peb(struct ubi_device *ubi) 261 { 262 int err; 263 264 spin_lock(&ubi->wl_lock); 265 while (!ubi->free.rb_node) { 266 spin_unlock(&ubi->wl_lock); 267 268 dbg_wl("do one work synchronously"); 269 err = do_work(ubi); 270 if (err) 271 return err; 272 273 spin_lock(&ubi->wl_lock); 274 } 275 spin_unlock(&ubi->wl_lock); 276 277 return 0; 278 } 279 280 /** 281 * in_wl_tree - check if wear-leveling entry is present in a WL RB-tree. 282 * @e: the wear-leveling entry to check 283 * @root: the root of the tree 284 * 285 * This function returns non-zero if @e is in the @root RB-tree and zero if it 286 * is not. 287 */ 288 static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root) 289 { 290 struct rb_node *p; 291 292 p = root->rb_node; 293 while (p) { 294 struct ubi_wl_entry *e1; 295 296 e1 = rb_entry(p, struct ubi_wl_entry, u.rb); 297 298 if (e->pnum == e1->pnum) { 299 ubi_assert(e == e1); 300 return 1; 301 } 302 303 if (e->ec < e1->ec) 304 p = p->rb_left; 305 else if (e->ec > e1->ec) 306 p = p->rb_right; 307 else { 308 ubi_assert(e->pnum != e1->pnum); 309 if (e->pnum < e1->pnum) 310 p = p->rb_left; 311 else 312 p = p->rb_right; 313 } 314 } 315 316 return 0; 317 } 318 319 /** 320 * prot_queue_add - add physical eraseblock to the protection queue. 321 * @ubi: UBI device description object 322 * @e: the physical eraseblock to add 323 * 324 * This function adds @e to the tail of the protection queue @ubi->pq, where 325 * @e will stay for %UBI_PROT_QUEUE_LEN erase operations and will be 326 * temporarily protected from the wear-leveling worker. Note, @wl->lock has to 327 * be locked. 328 */ 329 static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e) 330 { 331 int pq_tail = ubi->pq_head - 1; 332 333 if (pq_tail < 0) 334 pq_tail = UBI_PROT_QUEUE_LEN - 1; 335 ubi_assert(pq_tail >= 0 && pq_tail < UBI_PROT_QUEUE_LEN); 336 list_add_tail(&e->u.list, &ubi->pq[pq_tail]); 337 dbg_wl("added PEB %d EC %d to the protection queue", e->pnum, e->ec); 338 } 339 340 /** 341 * find_wl_entry - find wear-leveling entry closest to certain erase counter. 342 * @root: the RB-tree where to look for 343 * @diff: maximum possible difference from the smallest erase counter 344 * 345 * This function looks for a wear leveling entry with erase counter closest to 346 * min + @diff, where min is the smallest erase counter. 347 */ 348 static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int diff) 349 { 350 struct rb_node *p; 351 struct ubi_wl_entry *e; 352 int max; 353 354 e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb); 355 max = e->ec + diff; 356 357 p = root->rb_node; 358 while (p) { 359 struct ubi_wl_entry *e1; 360 361 e1 = rb_entry(p, struct ubi_wl_entry, u.rb); 362 if (e1->ec >= max) 363 p = p->rb_left; 364 else { 365 p = p->rb_right; 366 e = e1; 367 } 368 } 369 370 return e; 371 } 372 373 /** 374 * ubi_wl_get_peb - get a physical eraseblock. 375 * @ubi: UBI device description object 376 * 377 * This function returns a physical eraseblock in case of success and a 378 * negative error code in case of failure. Might sleep. 379 */ 380 int ubi_wl_get_peb(struct ubi_device *ubi) 381 { 382 int err; 383 struct ubi_wl_entry *e, *first, *last; 384 385 retry: 386 spin_lock(&ubi->wl_lock); 387 if (!ubi->free.rb_node) { 388 if (ubi->works_count == 0) { 389 ubi_assert(list_empty(&ubi->works)); 390 ubi_err("no free eraseblocks"); 391 spin_unlock(&ubi->wl_lock); 392 return -ENOSPC; 393 } 394 spin_unlock(&ubi->wl_lock); 395 396 err = produce_free_peb(ubi); 397 if (err < 0) 398 return err; 399 goto retry; 400 } 401 402 first = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, u.rb); 403 last = rb_entry(rb_last(&ubi->free), struct ubi_wl_entry, u.rb); 404 405 if (last->ec - first->ec < WL_FREE_MAX_DIFF) 406 e = rb_entry(ubi->free.rb_node, struct ubi_wl_entry, u.rb); 407 else 408 e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF/2); 409 410 self_check_in_wl_tree(ubi, e, &ubi->free); 411 412 /* 413 * Move the physical eraseblock to the protection queue where it will 414 * be protected from being moved for some time. 415 */ 416 rb_erase(&e->u.rb, &ubi->free); 417 dbg_wl("PEB %d EC %d", e->pnum, e->ec); 418 prot_queue_add(ubi, e); 419 spin_unlock(&ubi->wl_lock); 420 421 err = ubi_self_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset, 422 ubi->peb_size - ubi->vid_hdr_aloffset); 423 if (err) { 424 ubi_err("new PEB %d does not contain all 0xFF bytes", e->pnum); 425 return err; 426 } 427 428 return e->pnum; 429 } 430 431 /** 432 * prot_queue_del - remove a physical eraseblock from the protection queue. 433 * @ubi: UBI device description object 434 * @pnum: the physical eraseblock to remove 435 * 436 * This function deletes PEB @pnum from the protection queue and returns zero 437 * in case of success and %-ENODEV if the PEB was not found. 438 */ 439 static int prot_queue_del(struct ubi_device *ubi, int pnum) 440 { 441 struct ubi_wl_entry *e; 442 443 e = ubi->lookuptbl[pnum]; 444 if (!e) 445 return -ENODEV; 446 447 if (self_check_in_pq(ubi, e)) 448 return -ENODEV; 449 450 list_del(&e->u.list); 451 dbg_wl("deleted PEB %d from the protection queue", e->pnum); 452 return 0; 453 } 454 455 /** 456 * sync_erase - synchronously erase a physical eraseblock. 457 * @ubi: UBI device description object 458 * @e: the the physical eraseblock to erase 459 * @torture: if the physical eraseblock has to be tortured 460 * 461 * This function returns zero in case of success and a negative error code in 462 * case of failure. 463 */ 464 static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, 465 int torture) 466 { 467 int err; 468 struct ubi_ec_hdr *ec_hdr; 469 unsigned long long ec = e->ec; 470 471 dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec); 472 473 err = self_check_ec(ubi, e->pnum, e->ec); 474 if (err) 475 return -EINVAL; 476 477 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS); 478 if (!ec_hdr) 479 return -ENOMEM; 480 481 err = ubi_io_sync_erase(ubi, e->pnum, torture); 482 if (err < 0) 483 goto out_free; 484 485 ec += err; 486 if (ec > UBI_MAX_ERASECOUNTER) { 487 /* 488 * Erase counter overflow. Upgrade UBI and use 64-bit 489 * erase counters internally. 490 */ 491 ubi_err("erase counter overflow at PEB %d, EC %llu", 492 e->pnum, ec); 493 err = -EINVAL; 494 goto out_free; 495 } 496 497 dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec); 498 499 ec_hdr->ec = cpu_to_be64(ec); 500 501 err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr); 502 if (err) 503 goto out_free; 504 505 e->ec = ec; 506 spin_lock(&ubi->wl_lock); 507 if (e->ec > ubi->max_ec) 508 ubi->max_ec = e->ec; 509 spin_unlock(&ubi->wl_lock); 510 511 out_free: 512 kfree(ec_hdr); 513 return err; 514 } 515 516 /** 517 * serve_prot_queue - check if it is time to stop protecting PEBs. 518 * @ubi: UBI device description object 519 * 520 * This function is called after each erase operation and removes PEBs from the 521 * tail of the protection queue. These PEBs have been protected for long enough 522 * and should be moved to the used tree. 523 */ 524 static void serve_prot_queue(struct ubi_device *ubi) 525 { 526 struct ubi_wl_entry *e, *tmp; 527 int count; 528 529 /* 530 * There may be several protected physical eraseblock to remove, 531 * process them all. 532 */ 533 repeat: 534 count = 0; 535 spin_lock(&ubi->wl_lock); 536 list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) { 537 dbg_wl("PEB %d EC %d protection over, move to used tree", 538 e->pnum, e->ec); 539 540 list_del(&e->u.list); 541 wl_tree_add(e, &ubi->used); 542 if (count++ > 32) { 543 /* 544 * Let's be nice and avoid holding the spinlock for 545 * too long. 546 */ 547 spin_unlock(&ubi->wl_lock); 548 cond_resched(); 549 goto repeat; 550 } 551 } 552 553 ubi->pq_head += 1; 554 if (ubi->pq_head == UBI_PROT_QUEUE_LEN) 555 ubi->pq_head = 0; 556 ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN); 557 spin_unlock(&ubi->wl_lock); 558 } 559 560 /** 561 * schedule_ubi_work - schedule a work. 562 * @ubi: UBI device description object 563 * @wrk: the work to schedule 564 * 565 * This function adds a work defined by @wrk to the tail of the pending works 566 * list. 567 */ 568 static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk) 569 { 570 spin_lock(&ubi->wl_lock); 571 list_add_tail(&wrk->list, &ubi->works); 572 ubi_assert(ubi->works_count >= 0); 573 ubi->works_count += 1; 574 if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled(ubi)) 575 wake_up_process(ubi->bgt_thread); 576 spin_unlock(&ubi->wl_lock); 577 } 578 579 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, 580 int cancel); 581 582 /** 583 * schedule_erase - schedule an erase work. 584 * @ubi: UBI device description object 585 * @e: the WL entry of the physical eraseblock to erase 586 * @vol_id: the volume ID that last used this PEB 587 * @lnum: the last used logical eraseblock number for the PEB 588 * @torture: if the physical eraseblock has to be tortured 589 * 590 * This function returns zero in case of success and a %-ENOMEM in case of 591 * failure. 592 */ 593 static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, 594 int vol_id, int lnum, int torture) 595 { 596 struct ubi_work *wl_wrk; 597 598 dbg_wl("schedule erasure of PEB %d, EC %d, torture %d", 599 e->pnum, e->ec, torture); 600 601 wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS); 602 if (!wl_wrk) 603 return -ENOMEM; 604 605 wl_wrk->func = &erase_worker; 606 wl_wrk->e = e; 607 wl_wrk->vol_id = vol_id; 608 wl_wrk->lnum = lnum; 609 wl_wrk->torture = torture; 610 611 schedule_ubi_work(ubi, wl_wrk); 612 return 0; 613 } 614 615 /** 616 * wear_leveling_worker - wear-leveling worker function. 617 * @ubi: UBI device description object 618 * @wrk: the work object 619 * @cancel: non-zero if the worker has to free memory and exit 620 * 621 * This function copies a more worn out physical eraseblock to a less worn out 622 * one. Returns zero in case of success and a negative error code in case of 623 * failure. 624 */ 625 static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, 626 int cancel) 627 { 628 int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0; 629 int vol_id = -1, uninitialized_var(lnum); 630 struct ubi_wl_entry *e1, *e2; 631 struct ubi_vid_hdr *vid_hdr; 632 633 kfree(wrk); 634 if (cancel) 635 return 0; 636 637 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); 638 if (!vid_hdr) 639 return -ENOMEM; 640 641 mutex_lock(&ubi->move_mutex); 642 spin_lock(&ubi->wl_lock); 643 ubi_assert(!ubi->move_from && !ubi->move_to); 644 ubi_assert(!ubi->move_to_put); 645 646 if (!ubi->free.rb_node || 647 (!ubi->used.rb_node && !ubi->scrub.rb_node)) { 648 /* 649 * No free physical eraseblocks? Well, they must be waiting in 650 * the queue to be erased. Cancel movement - it will be 651 * triggered again when a free physical eraseblock appears. 652 * 653 * No used physical eraseblocks? They must be temporarily 654 * protected from being moved. They will be moved to the 655 * @ubi->used tree later and the wear-leveling will be 656 * triggered again. 657 */ 658 dbg_wl("cancel WL, a list is empty: free %d, used %d", 659 !ubi->free.rb_node, !ubi->used.rb_node); 660 goto out_cancel; 661 } 662 663 if (!ubi->scrub.rb_node) { 664 /* 665 * Now pick the least worn-out used physical eraseblock and a 666 * highly worn-out free physical eraseblock. If the erase 667 * counters differ much enough, start wear-leveling. 668 */ 669 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb); 670 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); 671 672 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) { 673 dbg_wl("no WL needed: min used EC %d, max free EC %d", 674 e1->ec, e2->ec); 675 goto out_cancel; 676 } 677 self_check_in_wl_tree(ubi, e1, &ubi->used); 678 rb_erase(&e1->u.rb, &ubi->used); 679 dbg_wl("move PEB %d EC %d to PEB %d EC %d", 680 e1->pnum, e1->ec, e2->pnum, e2->ec); 681 } else { 682 /* Perform scrubbing */ 683 scrubbing = 1; 684 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb); 685 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); 686 self_check_in_wl_tree(ubi, e1, &ubi->scrub); 687 rb_erase(&e1->u.rb, &ubi->scrub); 688 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum); 689 } 690 691 self_check_in_wl_tree(ubi, e2, &ubi->free); 692 rb_erase(&e2->u.rb, &ubi->free); 693 ubi->move_from = e1; 694 ubi->move_to = e2; 695 spin_unlock(&ubi->wl_lock); 696 697 /* 698 * Now we are going to copy physical eraseblock @e1->pnum to @e2->pnum. 699 * We so far do not know which logical eraseblock our physical 700 * eraseblock (@e1) belongs to. We have to read the volume identifier 701 * header first. 702 * 703 * Note, we are protected from this PEB being unmapped and erased. The 704 * 'ubi_wl_put_peb()' would wait for moving to be finished if the PEB 705 * which is being moved was unmapped. 706 */ 707 708 err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0); 709 if (err && err != UBI_IO_BITFLIPS) { 710 if (err == UBI_IO_FF) { 711 /* 712 * We are trying to move PEB without a VID header. UBI 713 * always write VID headers shortly after the PEB was 714 * given, so we have a situation when it has not yet 715 * had a chance to write it, because it was preempted. 716 * So add this PEB to the protection queue so far, 717 * because presumably more data will be written there 718 * (including the missing VID header), and then we'll 719 * move it. 720 */ 721 dbg_wl("PEB %d has no VID header", e1->pnum); 722 protect = 1; 723 goto out_not_moved; 724 } else if (err == UBI_IO_FF_BITFLIPS) { 725 /* 726 * The same situation as %UBI_IO_FF, but bit-flips were 727 * detected. It is better to schedule this PEB for 728 * scrubbing. 729 */ 730 dbg_wl("PEB %d has no VID header but has bit-flips", 731 e1->pnum); 732 scrubbing = 1; 733 goto out_not_moved; 734 } 735 736 ubi_err("error %d while reading VID header from PEB %d", 737 err, e1->pnum); 738 goto out_error; 739 } 740 741 vol_id = be32_to_cpu(vid_hdr->vol_id); 742 lnum = be32_to_cpu(vid_hdr->lnum); 743 744 err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr); 745 if (err) { 746 if (err == MOVE_CANCEL_RACE) { 747 /* 748 * The LEB has not been moved because the volume is 749 * being deleted or the PEB has been put meanwhile. We 750 * should prevent this PEB from being selected for 751 * wear-leveling movement again, so put it to the 752 * protection queue. 753 */ 754 protect = 1; 755 goto out_not_moved; 756 } 757 if (err == MOVE_RETRY) { 758 scrubbing = 1; 759 goto out_not_moved; 760 } 761 if (err == MOVE_TARGET_BITFLIPS || err == MOVE_TARGET_WR_ERR || 762 err == MOVE_TARGET_RD_ERR) { 763 /* 764 * Target PEB had bit-flips or write error - torture it. 765 */ 766 torture = 1; 767 goto out_not_moved; 768 } 769 770 if (err == MOVE_SOURCE_RD_ERR) { 771 /* 772 * An error happened while reading the source PEB. Do 773 * not switch to R/O mode in this case, and give the 774 * upper layers a possibility to recover from this, 775 * e.g. by unmapping corresponding LEB. Instead, just 776 * put this PEB to the @ubi->erroneous list to prevent 777 * UBI from trying to move it over and over again. 778 */ 779 if (ubi->erroneous_peb_count > ubi->max_erroneous) { 780 ubi_err("too many erroneous eraseblocks (%d)", 781 ubi->erroneous_peb_count); 782 goto out_error; 783 } 784 erroneous = 1; 785 goto out_not_moved; 786 } 787 788 if (err < 0) 789 goto out_error; 790 791 ubi_assert(0); 792 } 793 794 /* The PEB has been successfully moved */ 795 if (scrubbing) 796 ubi_msg("scrubbed PEB %d (LEB %d:%d), data moved to PEB %d", 797 e1->pnum, vol_id, lnum, e2->pnum); 798 ubi_free_vid_hdr(ubi, vid_hdr); 799 800 spin_lock(&ubi->wl_lock); 801 if (!ubi->move_to_put) { 802 wl_tree_add(e2, &ubi->used); 803 e2 = NULL; 804 } 805 ubi->move_from = ubi->move_to = NULL; 806 ubi->move_to_put = ubi->wl_scheduled = 0; 807 spin_unlock(&ubi->wl_lock); 808 809 err = schedule_erase(ubi, e1, vol_id, lnum, 0); 810 if (err) { 811 kmem_cache_free(ubi_wl_entry_slab, e1); 812 if (e2) 813 kmem_cache_free(ubi_wl_entry_slab, e2); 814 goto out_ro; 815 } 816 817 if (e2) { 818 /* 819 * Well, the target PEB was put meanwhile, schedule it for 820 * erasure. 821 */ 822 dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase", 823 e2->pnum, vol_id, lnum); 824 err = schedule_erase(ubi, e2, vol_id, lnum, 0); 825 if (err) { 826 kmem_cache_free(ubi_wl_entry_slab, e2); 827 goto out_ro; 828 } 829 } 830 831 dbg_wl("done"); 832 mutex_unlock(&ubi->move_mutex); 833 return 0; 834 835 /* 836 * For some reasons the LEB was not moved, might be an error, might be 837 * something else. @e1 was not changed, so return it back. @e2 might 838 * have been changed, schedule it for erasure. 839 */ 840 out_not_moved: 841 if (vol_id != -1) 842 dbg_wl("cancel moving PEB %d (LEB %d:%d) to PEB %d (%d)", 843 e1->pnum, vol_id, lnum, e2->pnum, err); 844 else 845 dbg_wl("cancel moving PEB %d to PEB %d (%d)", 846 e1->pnum, e2->pnum, err); 847 spin_lock(&ubi->wl_lock); 848 if (protect) 849 prot_queue_add(ubi, e1); 850 else if (erroneous) { 851 wl_tree_add(e1, &ubi->erroneous); 852 ubi->erroneous_peb_count += 1; 853 } else if (scrubbing) 854 wl_tree_add(e1, &ubi->scrub); 855 else 856 wl_tree_add(e1, &ubi->used); 857 ubi_assert(!ubi->move_to_put); 858 ubi->move_from = ubi->move_to = NULL; 859 ubi->wl_scheduled = 0; 860 spin_unlock(&ubi->wl_lock); 861 862 ubi_free_vid_hdr(ubi, vid_hdr); 863 err = schedule_erase(ubi, e2, vol_id, lnum, torture); 864 if (err) { 865 kmem_cache_free(ubi_wl_entry_slab, e2); 866 goto out_ro; 867 } 868 mutex_unlock(&ubi->move_mutex); 869 return 0; 870 871 out_error: 872 if (vol_id != -1) 873 ubi_err("error %d while moving PEB %d to PEB %d", 874 err, e1->pnum, e2->pnum); 875 else 876 ubi_err("error %d while moving PEB %d (LEB %d:%d) to PEB %d", 877 err, e1->pnum, vol_id, lnum, e2->pnum); 878 spin_lock(&ubi->wl_lock); 879 ubi->move_from = ubi->move_to = NULL; 880 ubi->move_to_put = ubi->wl_scheduled = 0; 881 spin_unlock(&ubi->wl_lock); 882 883 ubi_free_vid_hdr(ubi, vid_hdr); 884 kmem_cache_free(ubi_wl_entry_slab, e1); 885 kmem_cache_free(ubi_wl_entry_slab, e2); 886 887 out_ro: 888 ubi_ro_mode(ubi); 889 mutex_unlock(&ubi->move_mutex); 890 ubi_assert(err != 0); 891 return err < 0 ? err : -EIO; 892 893 out_cancel: 894 ubi->wl_scheduled = 0; 895 spin_unlock(&ubi->wl_lock); 896 mutex_unlock(&ubi->move_mutex); 897 ubi_free_vid_hdr(ubi, vid_hdr); 898 return 0; 899 } 900 901 /** 902 * ensure_wear_leveling - schedule wear-leveling if it is needed. 903 * @ubi: UBI device description object 904 * 905 * This function checks if it is time to start wear-leveling and schedules it 906 * if yes. This function returns zero in case of success and a negative error 907 * code in case of failure. 908 */ 909 static int ensure_wear_leveling(struct ubi_device *ubi) 910 { 911 int err = 0; 912 struct ubi_wl_entry *e1; 913 struct ubi_wl_entry *e2; 914 struct ubi_work *wrk; 915 916 spin_lock(&ubi->wl_lock); 917 if (ubi->wl_scheduled) 918 /* Wear-leveling is already in the work queue */ 919 goto out_unlock; 920 921 /* 922 * If the ubi->scrub tree is not empty, scrubbing is needed, and the 923 * the WL worker has to be scheduled anyway. 924 */ 925 if (!ubi->scrub.rb_node) { 926 if (!ubi->used.rb_node || !ubi->free.rb_node) 927 /* No physical eraseblocks - no deal */ 928 goto out_unlock; 929 930 /* 931 * We schedule wear-leveling only if the difference between the 932 * lowest erase counter of used physical eraseblocks and a high 933 * erase counter of free physical eraseblocks is greater than 934 * %UBI_WL_THRESHOLD. 935 */ 936 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb); 937 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); 938 939 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) 940 goto out_unlock; 941 dbg_wl("schedule wear-leveling"); 942 } else 943 dbg_wl("schedule scrubbing"); 944 945 ubi->wl_scheduled = 1; 946 spin_unlock(&ubi->wl_lock); 947 948 wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS); 949 if (!wrk) { 950 err = -ENOMEM; 951 goto out_cancel; 952 } 953 954 wrk->func = &wear_leveling_worker; 955 schedule_ubi_work(ubi, wrk); 956 return err; 957 958 out_cancel: 959 spin_lock(&ubi->wl_lock); 960 ubi->wl_scheduled = 0; 961 out_unlock: 962 spin_unlock(&ubi->wl_lock); 963 return err; 964 } 965 966 /** 967 * erase_worker - physical eraseblock erase worker function. 968 * @ubi: UBI device description object 969 * @wl_wrk: the work object 970 * @cancel: non-zero if the worker has to free memory and exit 971 * 972 * This function erases a physical eraseblock and perform torture testing if 973 * needed. It also takes care about marking the physical eraseblock bad if 974 * needed. Returns zero in case of success and a negative error code in case of 975 * failure. 976 */ 977 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, 978 int cancel) 979 { 980 struct ubi_wl_entry *e = wl_wrk->e; 981 int pnum = e->pnum, err, need; 982 int vol_id = wl_wrk->vol_id; 983 int lnum = wl_wrk->lnum; 984 985 if (cancel) { 986 dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec); 987 kfree(wl_wrk); 988 kmem_cache_free(ubi_wl_entry_slab, e); 989 return 0; 990 } 991 992 dbg_wl("erase PEB %d EC %d LEB %d:%d", 993 pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum); 994 995 err = sync_erase(ubi, e, wl_wrk->torture); 996 if (!err) { 997 /* Fine, we've erased it successfully */ 998 kfree(wl_wrk); 999 1000 spin_lock(&ubi->wl_lock); 1001 wl_tree_add(e, &ubi->free); 1002 spin_unlock(&ubi->wl_lock); 1003 1004 /* 1005 * One more erase operation has happened, take care about 1006 * protected physical eraseblocks. 1007 */ 1008 serve_prot_queue(ubi); 1009 1010 /* And take care about wear-leveling */ 1011 err = ensure_wear_leveling(ubi); 1012 return err; 1013 } 1014 1015 ubi_err("failed to erase PEB %d, error %d", pnum, err); 1016 kfree(wl_wrk); 1017 1018 if (err == -EINTR || err == -ENOMEM || err == -EAGAIN || 1019 err == -EBUSY) { 1020 int err1; 1021 1022 /* Re-schedule the LEB for erasure */ 1023 err1 = schedule_erase(ubi, e, vol_id, lnum, 0); 1024 if (err1) { 1025 err = err1; 1026 goto out_ro; 1027 } 1028 return err; 1029 } 1030 1031 kmem_cache_free(ubi_wl_entry_slab, e); 1032 if (err != -EIO) 1033 /* 1034 * If this is not %-EIO, we have no idea what to do. Scheduling 1035 * this physical eraseblock for erasure again would cause 1036 * errors again and again. Well, lets switch to R/O mode. 1037 */ 1038 goto out_ro; 1039 1040 /* It is %-EIO, the PEB went bad */ 1041 1042 if (!ubi->bad_allowed) { 1043 ubi_err("bad physical eraseblock %d detected", pnum); 1044 goto out_ro; 1045 } 1046 1047 spin_lock(&ubi->volumes_lock); 1048 need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs + 1; 1049 if (need > 0) { 1050 need = ubi->avail_pebs >= need ? need : ubi->avail_pebs; 1051 ubi->avail_pebs -= need; 1052 ubi->rsvd_pebs += need; 1053 ubi->beb_rsvd_pebs += need; 1054 if (need > 0) 1055 ubi_msg("reserve more %d PEBs", need); 1056 } 1057 1058 if (ubi->beb_rsvd_pebs == 0) { 1059 spin_unlock(&ubi->volumes_lock); 1060 ubi_err("no reserved physical eraseblocks"); 1061 goto out_ro; 1062 } 1063 spin_unlock(&ubi->volumes_lock); 1064 1065 ubi_msg("mark PEB %d as bad", pnum); 1066 err = ubi_io_mark_bad(ubi, pnum); 1067 if (err) 1068 goto out_ro; 1069 1070 spin_lock(&ubi->volumes_lock); 1071 ubi->beb_rsvd_pebs -= 1; 1072 ubi->bad_peb_count += 1; 1073 ubi->good_peb_count -= 1; 1074 ubi_calculate_reserved(ubi); 1075 if (ubi->beb_rsvd_pebs) 1076 ubi_msg("%d PEBs left in the reserve", ubi->beb_rsvd_pebs); 1077 else 1078 ubi_warn("last PEB from the reserved pool was used"); 1079 spin_unlock(&ubi->volumes_lock); 1080 1081 return err; 1082 1083 out_ro: 1084 ubi_ro_mode(ubi); 1085 return err; 1086 } 1087 1088 /** 1089 * ubi_wl_put_peb - return a PEB to the wear-leveling sub-system. 1090 * @ubi: UBI device description object 1091 * @vol_id: the volume ID that last used this PEB 1092 * @lnum: the last used logical eraseblock number for the PEB 1093 * @pnum: physical eraseblock to return 1094 * @torture: if this physical eraseblock has to be tortured 1095 * 1096 * This function is called to return physical eraseblock @pnum to the pool of 1097 * free physical eraseblocks. The @torture flag has to be set if an I/O error 1098 * occurred to this @pnum and it has to be tested. This function returns zero 1099 * in case of success, and a negative error code in case of failure. 1100 */ 1101 int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum, 1102 int pnum, int torture) 1103 { 1104 int err; 1105 struct ubi_wl_entry *e; 1106 1107 dbg_wl("PEB %d", pnum); 1108 ubi_assert(pnum >= 0); 1109 ubi_assert(pnum < ubi->peb_count); 1110 1111 retry: 1112 spin_lock(&ubi->wl_lock); 1113 e = ubi->lookuptbl[pnum]; 1114 if (e == ubi->move_from) { 1115 /* 1116 * User is putting the physical eraseblock which was selected to 1117 * be moved. It will be scheduled for erasure in the 1118 * wear-leveling worker. 1119 */ 1120 dbg_wl("PEB %d is being moved, wait", pnum); 1121 spin_unlock(&ubi->wl_lock); 1122 1123 /* Wait for the WL worker by taking the @ubi->move_mutex */ 1124 mutex_lock(&ubi->move_mutex); 1125 mutex_unlock(&ubi->move_mutex); 1126 goto retry; 1127 } else if (e == ubi->move_to) { 1128 /* 1129 * User is putting the physical eraseblock which was selected 1130 * as the target the data is moved to. It may happen if the EBA 1131 * sub-system already re-mapped the LEB in 'ubi_eba_copy_leb()' 1132 * but the WL sub-system has not put the PEB to the "used" tree 1133 * yet, but it is about to do this. So we just set a flag which 1134 * will tell the WL worker that the PEB is not needed anymore 1135 * and should be scheduled for erasure. 1136 */ 1137 dbg_wl("PEB %d is the target of data moving", pnum); 1138 ubi_assert(!ubi->move_to_put); 1139 ubi->move_to_put = 1; 1140 spin_unlock(&ubi->wl_lock); 1141 return 0; 1142 } else { 1143 if (in_wl_tree(e, &ubi->used)) { 1144 self_check_in_wl_tree(ubi, e, &ubi->used); 1145 rb_erase(&e->u.rb, &ubi->used); 1146 } else if (in_wl_tree(e, &ubi->scrub)) { 1147 self_check_in_wl_tree(ubi, e, &ubi->scrub); 1148 rb_erase(&e->u.rb, &ubi->scrub); 1149 } else if (in_wl_tree(e, &ubi->erroneous)) { 1150 self_check_in_wl_tree(ubi, e, &ubi->erroneous); 1151 rb_erase(&e->u.rb, &ubi->erroneous); 1152 ubi->erroneous_peb_count -= 1; 1153 ubi_assert(ubi->erroneous_peb_count >= 0); 1154 /* Erroneous PEBs should be tortured */ 1155 torture = 1; 1156 } else { 1157 err = prot_queue_del(ubi, e->pnum); 1158 if (err) { 1159 ubi_err("PEB %d not found", pnum); 1160 ubi_ro_mode(ubi); 1161 spin_unlock(&ubi->wl_lock); 1162 return err; 1163 } 1164 } 1165 } 1166 spin_unlock(&ubi->wl_lock); 1167 1168 err = schedule_erase(ubi, e, vol_id, lnum, torture); 1169 if (err) { 1170 spin_lock(&ubi->wl_lock); 1171 wl_tree_add(e, &ubi->used); 1172 spin_unlock(&ubi->wl_lock); 1173 } 1174 1175 return err; 1176 } 1177 1178 /** 1179 * ubi_wl_scrub_peb - schedule a physical eraseblock for scrubbing. 1180 * @ubi: UBI device description object 1181 * @pnum: the physical eraseblock to schedule 1182 * 1183 * If a bit-flip in a physical eraseblock is detected, this physical eraseblock 1184 * needs scrubbing. This function schedules a physical eraseblock for 1185 * scrubbing which is done in background. This function returns zero in case of 1186 * success and a negative error code in case of failure. 1187 */ 1188 int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum) 1189 { 1190 struct ubi_wl_entry *e; 1191 1192 dbg_msg("schedule PEB %d for scrubbing", pnum); 1193 1194 retry: 1195 spin_lock(&ubi->wl_lock); 1196 e = ubi->lookuptbl[pnum]; 1197 if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) || 1198 in_wl_tree(e, &ubi->erroneous)) { 1199 spin_unlock(&ubi->wl_lock); 1200 return 0; 1201 } 1202 1203 if (e == ubi->move_to) { 1204 /* 1205 * This physical eraseblock was used to move data to. The data 1206 * was moved but the PEB was not yet inserted to the proper 1207 * tree. We should just wait a little and let the WL worker 1208 * proceed. 1209 */ 1210 spin_unlock(&ubi->wl_lock); 1211 dbg_wl("the PEB %d is not in proper tree, retry", pnum); 1212 yield(); 1213 goto retry; 1214 } 1215 1216 if (in_wl_tree(e, &ubi->used)) { 1217 self_check_in_wl_tree(ubi, e, &ubi->used); 1218 rb_erase(&e->u.rb, &ubi->used); 1219 } else { 1220 int err; 1221 1222 err = prot_queue_del(ubi, e->pnum); 1223 if (err) { 1224 ubi_err("PEB %d not found", pnum); 1225 ubi_ro_mode(ubi); 1226 spin_unlock(&ubi->wl_lock); 1227 return err; 1228 } 1229 } 1230 1231 wl_tree_add(e, &ubi->scrub); 1232 spin_unlock(&ubi->wl_lock); 1233 1234 /* 1235 * Technically scrubbing is the same as wear-leveling, so it is done 1236 * by the WL worker. 1237 */ 1238 return ensure_wear_leveling(ubi); 1239 } 1240 1241 /** 1242 * ubi_wl_flush - flush all pending works. 1243 * @ubi: UBI device description object 1244 * @vol_id: the volume id to flush for 1245 * @lnum: the logical eraseblock number to flush for 1246 * 1247 * This function executes all pending works for a particular volume id / 1248 * logical eraseblock number pair. If either value is set to %UBI_ALL, then it 1249 * acts as a wildcard for all of the corresponding volume numbers or logical 1250 * eraseblock numbers. It returns zero in case of success and a negative error 1251 * code in case of failure. 1252 */ 1253 int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum) 1254 { 1255 int err = 0; 1256 int found = 1; 1257 1258 /* 1259 * Erase while the pending works queue is not empty, but not more than 1260 * the number of currently pending works. 1261 */ 1262 dbg_wl("flush pending work for LEB %d:%d (%d pending works)", 1263 vol_id, lnum, ubi->works_count); 1264 1265 while (found) { 1266 struct ubi_work *wrk; 1267 found = 0; 1268 1269 down_read(&ubi->work_sem); 1270 spin_lock(&ubi->wl_lock); 1271 list_for_each_entry(wrk, &ubi->works, list) { 1272 if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) && 1273 (lnum == UBI_ALL || wrk->lnum == lnum)) { 1274 list_del(&wrk->list); 1275 ubi->works_count -= 1; 1276 ubi_assert(ubi->works_count >= 0); 1277 spin_unlock(&ubi->wl_lock); 1278 1279 err = wrk->func(ubi, wrk, 0); 1280 if (err) { 1281 up_read(&ubi->work_sem); 1282 return err; 1283 } 1284 1285 spin_lock(&ubi->wl_lock); 1286 found = 1; 1287 break; 1288 } 1289 } 1290 spin_unlock(&ubi->wl_lock); 1291 up_read(&ubi->work_sem); 1292 } 1293 1294 /* 1295 * Make sure all the works which have been done in parallel are 1296 * finished. 1297 */ 1298 down_write(&ubi->work_sem); 1299 up_write(&ubi->work_sem); 1300 1301 return err; 1302 } 1303 1304 /** 1305 * tree_destroy - destroy an RB-tree. 1306 * @root: the root of the tree to destroy 1307 */ 1308 static void tree_destroy(struct rb_root *root) 1309 { 1310 struct rb_node *rb; 1311 struct ubi_wl_entry *e; 1312 1313 rb = root->rb_node; 1314 while (rb) { 1315 if (rb->rb_left) 1316 rb = rb->rb_left; 1317 else if (rb->rb_right) 1318 rb = rb->rb_right; 1319 else { 1320 e = rb_entry(rb, struct ubi_wl_entry, u.rb); 1321 1322 rb = rb_parent(rb); 1323 if (rb) { 1324 if (rb->rb_left == &e->u.rb) 1325 rb->rb_left = NULL; 1326 else 1327 rb->rb_right = NULL; 1328 } 1329 1330 kmem_cache_free(ubi_wl_entry_slab, e); 1331 } 1332 } 1333 } 1334 1335 /** 1336 * ubi_thread - UBI background thread. 1337 * @u: the UBI device description object pointer 1338 */ 1339 int ubi_thread(void *u) 1340 { 1341 int failures = 0; 1342 struct ubi_device *ubi = u; 1343 1344 ubi_msg("background thread \"%s\" started, PID %d", 1345 ubi->bgt_name, task_pid_nr(current)); 1346 1347 set_freezable(); 1348 for (;;) { 1349 int err; 1350 1351 if (kthread_should_stop()) 1352 break; 1353 1354 if (try_to_freeze()) 1355 continue; 1356 1357 spin_lock(&ubi->wl_lock); 1358 if (list_empty(&ubi->works) || ubi->ro_mode || 1359 !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi)) { 1360 set_current_state(TASK_INTERRUPTIBLE); 1361 spin_unlock(&ubi->wl_lock); 1362 schedule(); 1363 continue; 1364 } 1365 spin_unlock(&ubi->wl_lock); 1366 1367 err = do_work(ubi); 1368 if (err) { 1369 ubi_err("%s: work failed with error code %d", 1370 ubi->bgt_name, err); 1371 if (failures++ > WL_MAX_FAILURES) { 1372 /* 1373 * Too many failures, disable the thread and 1374 * switch to read-only mode. 1375 */ 1376 ubi_msg("%s: %d consecutive failures", 1377 ubi->bgt_name, WL_MAX_FAILURES); 1378 ubi_ro_mode(ubi); 1379 ubi->thread_enabled = 0; 1380 continue; 1381 } 1382 } else 1383 failures = 0; 1384 1385 cond_resched(); 1386 } 1387 1388 dbg_wl("background thread \"%s\" is killed", ubi->bgt_name); 1389 return 0; 1390 } 1391 1392 /** 1393 * cancel_pending - cancel all pending works. 1394 * @ubi: UBI device description object 1395 */ 1396 static void cancel_pending(struct ubi_device *ubi) 1397 { 1398 while (!list_empty(&ubi->works)) { 1399 struct ubi_work *wrk; 1400 1401 wrk = list_entry(ubi->works.next, struct ubi_work, list); 1402 list_del(&wrk->list); 1403 wrk->func(ubi, wrk, 1); 1404 ubi->works_count -= 1; 1405 ubi_assert(ubi->works_count >= 0); 1406 } 1407 } 1408 1409 /** 1410 * ubi_wl_init - initialize the WL sub-system using attaching information. 1411 * @ubi: UBI device description object 1412 * @ai: attaching information 1413 * 1414 * This function returns zero in case of success, and a negative error code in 1415 * case of failure. 1416 */ 1417 int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) 1418 { 1419 int err, i; 1420 struct rb_node *rb1, *rb2; 1421 struct ubi_ainf_volume *av; 1422 struct ubi_ainf_peb *aeb, *tmp; 1423 struct ubi_wl_entry *e; 1424 1425 ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT; 1426 spin_lock_init(&ubi->wl_lock); 1427 mutex_init(&ubi->move_mutex); 1428 init_rwsem(&ubi->work_sem); 1429 ubi->max_ec = ai->max_ec; 1430 INIT_LIST_HEAD(&ubi->works); 1431 1432 sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num); 1433 1434 err = -ENOMEM; 1435 ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL); 1436 if (!ubi->lookuptbl) 1437 return err; 1438 1439 for (i = 0; i < UBI_PROT_QUEUE_LEN; i++) 1440 INIT_LIST_HEAD(&ubi->pq[i]); 1441 ubi->pq_head = 0; 1442 1443 list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) { 1444 cond_resched(); 1445 1446 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); 1447 if (!e) 1448 goto out_free; 1449 1450 e->pnum = aeb->pnum; 1451 e->ec = aeb->ec; 1452 ubi->lookuptbl[e->pnum] = e; 1453 if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) { 1454 kmem_cache_free(ubi_wl_entry_slab, e); 1455 goto out_free; 1456 } 1457 } 1458 1459 list_for_each_entry(aeb, &ai->free, u.list) { 1460 cond_resched(); 1461 1462 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); 1463 if (!e) 1464 goto out_free; 1465 1466 e->pnum = aeb->pnum; 1467 e->ec = aeb->ec; 1468 ubi_assert(e->ec >= 0); 1469 wl_tree_add(e, &ubi->free); 1470 ubi->lookuptbl[e->pnum] = e; 1471 } 1472 1473 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) { 1474 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) { 1475 cond_resched(); 1476 1477 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); 1478 if (!e) 1479 goto out_free; 1480 1481 e->pnum = aeb->pnum; 1482 e->ec = aeb->ec; 1483 ubi->lookuptbl[e->pnum] = e; 1484 if (!aeb->scrub) { 1485 dbg_wl("add PEB %d EC %d to the used tree", 1486 e->pnum, e->ec); 1487 wl_tree_add(e, &ubi->used); 1488 } else { 1489 dbg_wl("add PEB %d EC %d to the scrub tree", 1490 e->pnum, e->ec); 1491 wl_tree_add(e, &ubi->scrub); 1492 } 1493 } 1494 } 1495 1496 if (ubi->avail_pebs < WL_RESERVED_PEBS) { 1497 ubi_err("no enough physical eraseblocks (%d, need %d)", 1498 ubi->avail_pebs, WL_RESERVED_PEBS); 1499 if (ubi->corr_peb_count) 1500 ubi_err("%d PEBs are corrupted and not used", 1501 ubi->corr_peb_count); 1502 goto out_free; 1503 } 1504 ubi->avail_pebs -= WL_RESERVED_PEBS; 1505 ubi->rsvd_pebs += WL_RESERVED_PEBS; 1506 1507 /* Schedule wear-leveling if needed */ 1508 err = ensure_wear_leveling(ubi); 1509 if (err) 1510 goto out_free; 1511 1512 return 0; 1513 1514 out_free: 1515 cancel_pending(ubi); 1516 tree_destroy(&ubi->used); 1517 tree_destroy(&ubi->free); 1518 tree_destroy(&ubi->scrub); 1519 kfree(ubi->lookuptbl); 1520 return err; 1521 } 1522 1523 /** 1524 * protection_queue_destroy - destroy the protection queue. 1525 * @ubi: UBI device description object 1526 */ 1527 static void protection_queue_destroy(struct ubi_device *ubi) 1528 { 1529 int i; 1530 struct ubi_wl_entry *e, *tmp; 1531 1532 for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) { 1533 list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) { 1534 list_del(&e->u.list); 1535 kmem_cache_free(ubi_wl_entry_slab, e); 1536 } 1537 } 1538 } 1539 1540 /** 1541 * ubi_wl_close - close the wear-leveling sub-system. 1542 * @ubi: UBI device description object 1543 */ 1544 void ubi_wl_close(struct ubi_device *ubi) 1545 { 1546 dbg_wl("close the WL sub-system"); 1547 cancel_pending(ubi); 1548 protection_queue_destroy(ubi); 1549 tree_destroy(&ubi->used); 1550 tree_destroy(&ubi->erroneous); 1551 tree_destroy(&ubi->free); 1552 tree_destroy(&ubi->scrub); 1553 kfree(ubi->lookuptbl); 1554 } 1555 1556 /** 1557 * self_check_ec - make sure that the erase counter of a PEB is correct. 1558 * @ubi: UBI device description object 1559 * @pnum: the physical eraseblock number to check 1560 * @ec: the erase counter to check 1561 * 1562 * This function returns zero if the erase counter of physical eraseblock @pnum 1563 * is equivalent to @ec, and a negative error code if not or if an error 1564 * occurred. 1565 */ 1566 static int self_check_ec(struct ubi_device *ubi, int pnum, int ec) 1567 { 1568 int err; 1569 long long read_ec; 1570 struct ubi_ec_hdr *ec_hdr; 1571 1572 if (!ubi->dbg->chk_gen) 1573 return 0; 1574 1575 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS); 1576 if (!ec_hdr) 1577 return -ENOMEM; 1578 1579 err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0); 1580 if (err && err != UBI_IO_BITFLIPS) { 1581 /* The header does not have to exist */ 1582 err = 0; 1583 goto out_free; 1584 } 1585 1586 read_ec = be64_to_cpu(ec_hdr->ec); 1587 if (ec != read_ec) { 1588 ubi_err("self-check failed for PEB %d", pnum); 1589 ubi_err("read EC is %lld, should be %d", read_ec, ec); 1590 dump_stack(); 1591 err = 1; 1592 } else 1593 err = 0; 1594 1595 out_free: 1596 kfree(ec_hdr); 1597 return err; 1598 } 1599 1600 /** 1601 * self_check_in_wl_tree - check that wear-leveling entry is in WL RB-tree. 1602 * @ubi: UBI device description object 1603 * @e: the wear-leveling entry to check 1604 * @root: the root of the tree 1605 * 1606 * This function returns zero if @e is in the @root RB-tree and %-EINVAL if it 1607 * is not. 1608 */ 1609 static int self_check_in_wl_tree(const struct ubi_device *ubi, 1610 struct ubi_wl_entry *e, struct rb_root *root) 1611 { 1612 if (!ubi->dbg->chk_gen) 1613 return 0; 1614 1615 if (in_wl_tree(e, root)) 1616 return 0; 1617 1618 ubi_err("self-check failed for PEB %d, EC %d, RB-tree %p ", 1619 e->pnum, e->ec, root); 1620 dump_stack(); 1621 return -EINVAL; 1622 } 1623 1624 /** 1625 * self_check_in_pq - check if wear-leveling entry is in the protection 1626 * queue. 1627 * @ubi: UBI device description object 1628 * @e: the wear-leveling entry to check 1629 * 1630 * This function returns zero if @e is in @ubi->pq and %-EINVAL if it is not. 1631 */ 1632 static int self_check_in_pq(const struct ubi_device *ubi, 1633 struct ubi_wl_entry *e) 1634 { 1635 struct ubi_wl_entry *p; 1636 int i; 1637 1638 if (!ubi->dbg->chk_gen) 1639 return 0; 1640 1641 for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) 1642 list_for_each_entry(p, &ubi->pq[i], u.list) 1643 if (p == e) 1644 return 0; 1645 1646 ubi_err("self-check failed for PEB %d, EC %d, Protect queue", 1647 e->pnum, e->ec); 1648 dump_stack(); 1649 return -EINVAL; 1650 } 1651