1 /* 2 * Copyright (c) International Business Machines Corp., 2006 3 * 4 * SPDX-License-Identifier: GPL-2.0+ 5 * 6 * Author: Artem Bityutskiy (Битюцкий Артём) 7 */ 8 9 /* 10 * The UBI Eraseblock Association (EBA) sub-system. 11 * 12 * This sub-system is responsible for I/O to/from logical eraseblock. 13 * 14 * Although in this implementation the EBA table is fully kept and managed in 15 * RAM, which assumes poor scalability, it might be (partially) maintained on 16 * flash in future implementations. 17 * 18 * The EBA sub-system implements per-logical eraseblock locking. Before 19 * accessing a logical eraseblock it is locked for reading or writing. The 20 * per-logical eraseblock locking is implemented by means of the lock tree. The 21 * lock tree is an RB-tree which refers all the currently locked logical 22 * eraseblocks. The lock tree elements are &struct ubi_ltree_entry objects. 23 * They are indexed by (@vol_id, @lnum) pairs. 24 * 25 * EBA also maintains the global sequence counter which is incremented each 26 * time a logical eraseblock is mapped to a physical eraseblock and it is 27 * stored in the volume identifier header. This means that each VID header has 28 * a unique sequence number. The sequence number is only increased an we assume 29 * 64 bits is enough to never overflow. 30 */ 31 32 #ifndef __UBOOT__ 33 #include <linux/slab.h> 34 #include <linux/crc32.h> 35 #else 36 #include <ubi_uboot.h> 37 #endif 38 39 #include <linux/err.h> 40 #include "ubi.h" 41 42 /* Number of physical eraseblocks reserved for atomic LEB change operation */ 43 #define EBA_RESERVED_PEBS 1 44 45 /** 46 * next_sqnum - get next sequence number. 47 * @ubi: UBI device description object 48 * 49 * This function returns next sequence number to use, which is just the current 50 * global sequence counter value. It also increases the global sequence 51 * counter. 52 */ 53 unsigned long long ubi_next_sqnum(struct ubi_device *ubi) 54 { 55 unsigned long long sqnum; 56 57 spin_lock(&ubi->ltree_lock); 58 sqnum = ubi->global_sqnum++; 59 spin_unlock(&ubi->ltree_lock); 60 61 return sqnum; 62 } 63 64 /** 65 * ubi_get_compat - get compatibility flags of a volume. 66 * @ubi: UBI device description object 67 * @vol_id: volume ID 68 * 69 * This function returns compatibility flags for an internal volume. User 70 * volumes have no compatibility flags, so %0 is returned. 71 */ 72 static int ubi_get_compat(const struct ubi_device *ubi, int vol_id) 73 { 74 if (vol_id == UBI_LAYOUT_VOLUME_ID) 75 return UBI_LAYOUT_VOLUME_COMPAT; 76 return 0; 77 } 78 79 /** 80 * ltree_lookup - look up the lock tree. 81 * @ubi: UBI device description object 82 * @vol_id: volume ID 83 * @lnum: logical eraseblock number 84 * 85 * This function returns a pointer to the corresponding &struct ubi_ltree_entry 86 * object if the logical eraseblock is locked and %NULL if it is not. 87 * @ubi->ltree_lock has to be locked. 88 */ 89 static struct ubi_ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id, 90 int lnum) 91 { 92 struct rb_node *p; 93 94 p = ubi->ltree.rb_node; 95 while (p) { 96 struct ubi_ltree_entry *le; 97 98 le = rb_entry(p, struct ubi_ltree_entry, rb); 99 100 if (vol_id < le->vol_id) 101 p = p->rb_left; 102 else if (vol_id > le->vol_id) 103 p = p->rb_right; 104 else { 105 if (lnum < le->lnum) 106 p = p->rb_left; 107 else if (lnum > le->lnum) 108 p = p->rb_right; 109 else 110 return le; 111 } 112 } 113 114 return NULL; 115 } 116 117 /** 118 * ltree_add_entry - add new entry to the lock tree. 119 * @ubi: UBI device description object 120 * @vol_id: volume ID 121 * @lnum: logical eraseblock number 122 * 123 * This function adds new entry for logical eraseblock (@vol_id, @lnum) to the 124 * lock tree. If such entry is already there, its usage counter is increased. 125 * Returns pointer to the lock tree entry or %-ENOMEM if memory allocation 126 * failed. 127 */ 128 static struct ubi_ltree_entry *ltree_add_entry(struct ubi_device *ubi, 129 int vol_id, int lnum) 130 { 131 struct ubi_ltree_entry *le, *le1, *le_free; 132 133 le = kmalloc(sizeof(struct ubi_ltree_entry), GFP_NOFS); 134 if (!le) 135 return ERR_PTR(-ENOMEM); 136 137 le->users = 0; 138 init_rwsem(&le->mutex); 139 le->vol_id = vol_id; 140 le->lnum = lnum; 141 142 spin_lock(&ubi->ltree_lock); 143 le1 = ltree_lookup(ubi, vol_id, lnum); 144 145 if (le1) { 146 /* 147 * This logical eraseblock is already locked. The newly 148 * allocated lock entry is not needed. 149 */ 150 le_free = le; 151 le = le1; 152 } else { 153 struct rb_node **p, *parent = NULL; 154 155 /* 156 * No lock entry, add the newly allocated one to the 157 * @ubi->ltree RB-tree. 158 */ 159 le_free = NULL; 160 161 p = &ubi->ltree.rb_node; 162 while (*p) { 163 parent = *p; 164 le1 = rb_entry(parent, struct ubi_ltree_entry, rb); 165 166 if (vol_id < le1->vol_id) 167 p = &(*p)->rb_left; 168 else if (vol_id > le1->vol_id) 169 p = &(*p)->rb_right; 170 else { 171 ubi_assert(lnum != le1->lnum); 172 if (lnum < le1->lnum) 173 p = &(*p)->rb_left; 174 else 175 p = &(*p)->rb_right; 176 } 177 } 178 179 rb_link_node(&le->rb, parent, p); 180 rb_insert_color(&le->rb, &ubi->ltree); 181 } 182 le->users += 1; 183 spin_unlock(&ubi->ltree_lock); 184 185 kfree(le_free); 186 return le; 187 } 188 189 /** 190 * leb_read_lock - lock logical eraseblock for reading. 191 * @ubi: UBI device description object 192 * @vol_id: volume ID 193 * @lnum: logical eraseblock number 194 * 195 * This function locks a logical eraseblock for reading. Returns zero in case 196 * of success and a negative error code in case of failure. 197 */ 198 static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum) 199 { 200 struct ubi_ltree_entry *le; 201 202 le = ltree_add_entry(ubi, vol_id, lnum); 203 if (IS_ERR(le)) 204 return PTR_ERR(le); 205 down_read(&le->mutex); 206 return 0; 207 } 208 209 /** 210 * leb_read_unlock - unlock logical eraseblock. 211 * @ubi: UBI device description object 212 * @vol_id: volume ID 213 * @lnum: logical eraseblock number 214 */ 215 static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum) 216 { 217 struct ubi_ltree_entry *le; 218 219 spin_lock(&ubi->ltree_lock); 220 le = ltree_lookup(ubi, vol_id, lnum); 221 le->users -= 1; 222 ubi_assert(le->users >= 0); 223 up_read(&le->mutex); 224 if (le->users == 0) { 225 rb_erase(&le->rb, &ubi->ltree); 226 kfree(le); 227 } 228 spin_unlock(&ubi->ltree_lock); 229 } 230 231 /** 232 * leb_write_lock - lock logical eraseblock for writing. 233 * @ubi: UBI device description object 234 * @vol_id: volume ID 235 * @lnum: logical eraseblock number 236 * 237 * This function locks a logical eraseblock for writing. Returns zero in case 238 * of success and a negative error code in case of failure. 239 */ 240 static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum) 241 { 242 struct ubi_ltree_entry *le; 243 244 le = ltree_add_entry(ubi, vol_id, lnum); 245 if (IS_ERR(le)) 246 return PTR_ERR(le); 247 down_write(&le->mutex); 248 return 0; 249 } 250 251 /** 252 * leb_write_lock - lock logical eraseblock for writing. 253 * @ubi: UBI device description object 254 * @vol_id: volume ID 255 * @lnum: logical eraseblock number 256 * 257 * This function locks a logical eraseblock for writing if there is no 258 * contention and does nothing if there is contention. Returns %0 in case of 259 * success, %1 in case of contention, and and a negative error code in case of 260 * failure. 261 */ 262 static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum) 263 { 264 struct ubi_ltree_entry *le; 265 266 le = ltree_add_entry(ubi, vol_id, lnum); 267 if (IS_ERR(le)) 268 return PTR_ERR(le); 269 if (down_write_trylock(&le->mutex)) 270 return 0; 271 272 /* Contention, cancel */ 273 spin_lock(&ubi->ltree_lock); 274 le->users -= 1; 275 ubi_assert(le->users >= 0); 276 if (le->users == 0) { 277 rb_erase(&le->rb, &ubi->ltree); 278 kfree(le); 279 } 280 spin_unlock(&ubi->ltree_lock); 281 282 return 1; 283 } 284 285 /** 286 * leb_write_unlock - unlock logical eraseblock. 287 * @ubi: UBI device description object 288 * @vol_id: volume ID 289 * @lnum: logical eraseblock number 290 */ 291 static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum) 292 { 293 struct ubi_ltree_entry *le; 294 295 spin_lock(&ubi->ltree_lock); 296 le = ltree_lookup(ubi, vol_id, lnum); 297 le->users -= 1; 298 ubi_assert(le->users >= 0); 299 up_write(&le->mutex); 300 if (le->users == 0) { 301 rb_erase(&le->rb, &ubi->ltree); 302 kfree(le); 303 } 304 spin_unlock(&ubi->ltree_lock); 305 } 306 307 /** 308 * ubi_eba_unmap_leb - un-map logical eraseblock. 309 * @ubi: UBI device description object 310 * @vol: volume description object 311 * @lnum: logical eraseblock number 312 * 313 * This function un-maps logical eraseblock @lnum and schedules corresponding 314 * physical eraseblock for erasure. Returns zero in case of success and a 315 * negative error code in case of failure. 316 */ 317 int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol, 318 int lnum) 319 { 320 int err, pnum, vol_id = vol->vol_id; 321 322 if (ubi->ro_mode) 323 return -EROFS; 324 325 err = leb_write_lock(ubi, vol_id, lnum); 326 if (err) 327 return err; 328 329 pnum = vol->eba_tbl[lnum]; 330 if (pnum < 0) 331 /* This logical eraseblock is already unmapped */ 332 goto out_unlock; 333 334 dbg_eba("erase LEB %d:%d, PEB %d", vol_id, lnum, pnum); 335 336 down_read(&ubi->fm_sem); 337 vol->eba_tbl[lnum] = UBI_LEB_UNMAPPED; 338 up_read(&ubi->fm_sem); 339 err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 0); 340 341 out_unlock: 342 leb_write_unlock(ubi, vol_id, lnum); 343 return err; 344 } 345 346 /** 347 * ubi_eba_read_leb - read data. 348 * @ubi: UBI device description object 349 * @vol: volume description object 350 * @lnum: logical eraseblock number 351 * @buf: buffer to store the read data 352 * @offset: offset from where to read 353 * @len: how many bytes to read 354 * @check: data CRC check flag 355 * 356 * If the logical eraseblock @lnum is unmapped, @buf is filled with 0xFF 357 * bytes. The @check flag only makes sense for static volumes and forces 358 * eraseblock data CRC checking. 359 * 360 * In case of success this function returns zero. In case of a static volume, 361 * if data CRC mismatches - %-EBADMSG is returned. %-EBADMSG may also be 362 * returned for any volume type if an ECC error was detected by the MTD device 363 * driver. Other negative error cored may be returned in case of other errors. 364 */ 365 int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, 366 void *buf, int offset, int len, int check) 367 { 368 int err, pnum, scrub = 0, vol_id = vol->vol_id; 369 struct ubi_vid_hdr *vid_hdr; 370 uint32_t uninitialized_var(crc); 371 372 err = leb_read_lock(ubi, vol_id, lnum); 373 if (err) 374 return err; 375 376 pnum = vol->eba_tbl[lnum]; 377 if (pnum < 0) { 378 /* 379 * The logical eraseblock is not mapped, fill the whole buffer 380 * with 0xFF bytes. The exception is static volumes for which 381 * it is an error to read unmapped logical eraseblocks. 382 */ 383 dbg_eba("read %d bytes from offset %d of LEB %d:%d (unmapped)", 384 len, offset, vol_id, lnum); 385 leb_read_unlock(ubi, vol_id, lnum); 386 ubi_assert(vol->vol_type != UBI_STATIC_VOLUME); 387 memset(buf, 0xFF, len); 388 return 0; 389 } 390 391 dbg_eba("read %d bytes from offset %d of LEB %d:%d, PEB %d", 392 len, offset, vol_id, lnum, pnum); 393 394 if (vol->vol_type == UBI_DYNAMIC_VOLUME) 395 check = 0; 396 397 retry: 398 if (check) { 399 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); 400 if (!vid_hdr) { 401 err = -ENOMEM; 402 goto out_unlock; 403 } 404 405 err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1); 406 if (err && err != UBI_IO_BITFLIPS) { 407 if (err > 0) { 408 /* 409 * The header is either absent or corrupted. 410 * The former case means there is a bug - 411 * switch to read-only mode just in case. 412 * The latter case means a real corruption - we 413 * may try to recover data. FIXME: but this is 414 * not implemented. 415 */ 416 if (err == UBI_IO_BAD_HDR_EBADMSG || 417 err == UBI_IO_BAD_HDR) { 418 ubi_warn("corrupted VID header at PEB %d, LEB %d:%d", 419 pnum, vol_id, lnum); 420 err = -EBADMSG; 421 } else 422 ubi_ro_mode(ubi); 423 } 424 goto out_free; 425 } else if (err == UBI_IO_BITFLIPS) 426 scrub = 1; 427 428 ubi_assert(lnum < be32_to_cpu(vid_hdr->used_ebs)); 429 ubi_assert(len == be32_to_cpu(vid_hdr->data_size)); 430 431 crc = be32_to_cpu(vid_hdr->data_crc); 432 ubi_free_vid_hdr(ubi, vid_hdr); 433 } 434 435 err = ubi_io_read_data(ubi, buf, pnum, offset, len); 436 if (err) { 437 if (err == UBI_IO_BITFLIPS) { 438 scrub = 1; 439 err = 0; 440 } else if (mtd_is_eccerr(err)) { 441 if (vol->vol_type == UBI_DYNAMIC_VOLUME) 442 goto out_unlock; 443 scrub = 1; 444 if (!check) { 445 ubi_msg("force data checking"); 446 check = 1; 447 goto retry; 448 } 449 } else 450 goto out_unlock; 451 } 452 453 if (check) { 454 uint32_t crc1 = crc32(UBI_CRC32_INIT, buf, len); 455 if (crc1 != crc) { 456 ubi_warn("CRC error: calculated %#08x, must be %#08x", 457 crc1, crc); 458 err = -EBADMSG; 459 goto out_unlock; 460 } 461 } 462 463 if (scrub) 464 err = ubi_wl_scrub_peb(ubi, pnum); 465 466 leb_read_unlock(ubi, vol_id, lnum); 467 return err; 468 469 out_free: 470 ubi_free_vid_hdr(ubi, vid_hdr); 471 out_unlock: 472 leb_read_unlock(ubi, vol_id, lnum); 473 return err; 474 } 475 476 /** 477 * recover_peb - recover from write failure. 478 * @ubi: UBI device description object 479 * @pnum: the physical eraseblock to recover 480 * @vol_id: volume ID 481 * @lnum: logical eraseblock number 482 * @buf: data which was not written because of the write failure 483 * @offset: offset of the failed write 484 * @len: how many bytes should have been written 485 * 486 * This function is called in case of a write failure and moves all good data 487 * from the potentially bad physical eraseblock to a good physical eraseblock. 488 * This function also writes the data which was not written due to the failure. 489 * Returns new physical eraseblock number in case of success, and a negative 490 * error code in case of failure. 491 */ 492 static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum, 493 const void *buf, int offset, int len) 494 { 495 int err, idx = vol_id2idx(ubi, vol_id), new_pnum, data_size, tries = 0; 496 struct ubi_volume *vol = ubi->volumes[idx]; 497 struct ubi_vid_hdr *vid_hdr; 498 499 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); 500 if (!vid_hdr) 501 return -ENOMEM; 502 503 retry: 504 new_pnum = ubi_wl_get_peb(ubi); 505 if (new_pnum < 0) { 506 ubi_free_vid_hdr(ubi, vid_hdr); 507 return new_pnum; 508 } 509 510 ubi_msg("recover PEB %d, move data to PEB %d", pnum, new_pnum); 511 512 err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1); 513 if (err && err != UBI_IO_BITFLIPS) { 514 if (err > 0) 515 err = -EIO; 516 goto out_put; 517 } 518 519 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); 520 err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr); 521 if (err) 522 goto write_error; 523 524 data_size = offset + len; 525 mutex_lock(&ubi->buf_mutex); 526 memset(ubi->peb_buf + offset, 0xFF, len); 527 528 /* Read everything before the area where the write failure happened */ 529 if (offset > 0) { 530 err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, offset); 531 if (err && err != UBI_IO_BITFLIPS) 532 goto out_unlock; 533 } 534 535 memcpy(ubi->peb_buf + offset, buf, len); 536 537 err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size); 538 if (err) { 539 mutex_unlock(&ubi->buf_mutex); 540 goto write_error; 541 } 542 543 mutex_unlock(&ubi->buf_mutex); 544 ubi_free_vid_hdr(ubi, vid_hdr); 545 546 down_read(&ubi->fm_sem); 547 vol->eba_tbl[lnum] = new_pnum; 548 up_read(&ubi->fm_sem); 549 ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1); 550 551 ubi_msg("data was successfully recovered"); 552 return 0; 553 554 out_unlock: 555 mutex_unlock(&ubi->buf_mutex); 556 out_put: 557 ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1); 558 ubi_free_vid_hdr(ubi, vid_hdr); 559 return err; 560 561 write_error: 562 /* 563 * Bad luck? This physical eraseblock is bad too? Crud. Let's try to 564 * get another one. 565 */ 566 ubi_warn("failed to write to PEB %d", new_pnum); 567 ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1); 568 if (++tries > UBI_IO_RETRIES) { 569 ubi_free_vid_hdr(ubi, vid_hdr); 570 return err; 571 } 572 ubi_msg("try again"); 573 goto retry; 574 } 575 576 /** 577 * ubi_eba_write_leb - write data to dynamic volume. 578 * @ubi: UBI device description object 579 * @vol: volume description object 580 * @lnum: logical eraseblock number 581 * @buf: the data to write 582 * @offset: offset within the logical eraseblock where to write 583 * @len: how many bytes to write 584 * 585 * This function writes data to logical eraseblock @lnum of a dynamic volume 586 * @vol. Returns zero in case of success and a negative error code in case 587 * of failure. In case of error, it is possible that something was still 588 * written to the flash media, but may be some garbage. 589 */ 590 int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, 591 const void *buf, int offset, int len) 592 { 593 int err, pnum, tries = 0, vol_id = vol->vol_id; 594 struct ubi_vid_hdr *vid_hdr; 595 596 if (ubi->ro_mode) 597 return -EROFS; 598 599 err = leb_write_lock(ubi, vol_id, lnum); 600 if (err) 601 return err; 602 603 pnum = vol->eba_tbl[lnum]; 604 if (pnum >= 0) { 605 dbg_eba("write %d bytes at offset %d of LEB %d:%d, PEB %d", 606 len, offset, vol_id, lnum, pnum); 607 608 err = ubi_io_write_data(ubi, buf, pnum, offset, len); 609 if (err) { 610 ubi_warn("failed to write data to PEB %d", pnum); 611 if (err == -EIO && ubi->bad_allowed) 612 err = recover_peb(ubi, pnum, vol_id, lnum, buf, 613 offset, len); 614 if (err) 615 ubi_ro_mode(ubi); 616 } 617 leb_write_unlock(ubi, vol_id, lnum); 618 return err; 619 } 620 621 /* 622 * The logical eraseblock is not mapped. We have to get a free physical 623 * eraseblock and write the volume identifier header there first. 624 */ 625 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); 626 if (!vid_hdr) { 627 leb_write_unlock(ubi, vol_id, lnum); 628 return -ENOMEM; 629 } 630 631 vid_hdr->vol_type = UBI_VID_DYNAMIC; 632 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); 633 vid_hdr->vol_id = cpu_to_be32(vol_id); 634 vid_hdr->lnum = cpu_to_be32(lnum); 635 vid_hdr->compat = ubi_get_compat(ubi, vol_id); 636 vid_hdr->data_pad = cpu_to_be32(vol->data_pad); 637 638 retry: 639 pnum = ubi_wl_get_peb(ubi); 640 if (pnum < 0) { 641 ubi_free_vid_hdr(ubi, vid_hdr); 642 leb_write_unlock(ubi, vol_id, lnum); 643 return pnum; 644 } 645 646 dbg_eba("write VID hdr and %d bytes at offset %d of LEB %d:%d, PEB %d", 647 len, offset, vol_id, lnum, pnum); 648 649 err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr); 650 if (err) { 651 ubi_warn("failed to write VID header to LEB %d:%d, PEB %d", 652 vol_id, lnum, pnum); 653 goto write_error; 654 } 655 656 if (len) { 657 err = ubi_io_write_data(ubi, buf, pnum, offset, len); 658 if (err) { 659 ubi_warn("failed to write %d bytes at offset %d of LEB %d:%d, PEB %d", 660 len, offset, vol_id, lnum, pnum); 661 goto write_error; 662 } 663 } 664 665 down_read(&ubi->fm_sem); 666 vol->eba_tbl[lnum] = pnum; 667 up_read(&ubi->fm_sem); 668 669 leb_write_unlock(ubi, vol_id, lnum); 670 ubi_free_vid_hdr(ubi, vid_hdr); 671 return 0; 672 673 write_error: 674 if (err != -EIO || !ubi->bad_allowed) { 675 ubi_ro_mode(ubi); 676 leb_write_unlock(ubi, vol_id, lnum); 677 ubi_free_vid_hdr(ubi, vid_hdr); 678 return err; 679 } 680 681 /* 682 * Fortunately, this is the first write operation to this physical 683 * eraseblock, so just put it and request a new one. We assume that if 684 * this physical eraseblock went bad, the erase code will handle that. 685 */ 686 err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1); 687 if (err || ++tries > UBI_IO_RETRIES) { 688 ubi_ro_mode(ubi); 689 leb_write_unlock(ubi, vol_id, lnum); 690 ubi_free_vid_hdr(ubi, vid_hdr); 691 return err; 692 } 693 694 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); 695 ubi_msg("try another PEB"); 696 goto retry; 697 } 698 699 /** 700 * ubi_eba_write_leb_st - write data to static volume. 701 * @ubi: UBI device description object 702 * @vol: volume description object 703 * @lnum: logical eraseblock number 704 * @buf: data to write 705 * @len: how many bytes to write 706 * @used_ebs: how many logical eraseblocks will this volume contain 707 * 708 * This function writes data to logical eraseblock @lnum of static volume 709 * @vol. The @used_ebs argument should contain total number of logical 710 * eraseblock in this static volume. 711 * 712 * When writing to the last logical eraseblock, the @len argument doesn't have 713 * to be aligned to the minimal I/O unit size. Instead, it has to be equivalent 714 * to the real data size, although the @buf buffer has to contain the 715 * alignment. In all other cases, @len has to be aligned. 716 * 717 * It is prohibited to write more than once to logical eraseblocks of static 718 * volumes. This function returns zero in case of success and a negative error 719 * code in case of failure. 720 */ 721 int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol, 722 int lnum, const void *buf, int len, int used_ebs) 723 { 724 int err, pnum, tries = 0, data_size = len, vol_id = vol->vol_id; 725 struct ubi_vid_hdr *vid_hdr; 726 uint32_t crc; 727 728 if (ubi->ro_mode) 729 return -EROFS; 730 731 if (lnum == used_ebs - 1) 732 /* If this is the last LEB @len may be unaligned */ 733 len = ALIGN(data_size, ubi->min_io_size); 734 else 735 ubi_assert(!(len & (ubi->min_io_size - 1))); 736 737 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); 738 if (!vid_hdr) 739 return -ENOMEM; 740 741 err = leb_write_lock(ubi, vol_id, lnum); 742 if (err) { 743 ubi_free_vid_hdr(ubi, vid_hdr); 744 return err; 745 } 746 747 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); 748 vid_hdr->vol_id = cpu_to_be32(vol_id); 749 vid_hdr->lnum = cpu_to_be32(lnum); 750 vid_hdr->compat = ubi_get_compat(ubi, vol_id); 751 vid_hdr->data_pad = cpu_to_be32(vol->data_pad); 752 753 crc = crc32(UBI_CRC32_INIT, buf, data_size); 754 vid_hdr->vol_type = UBI_VID_STATIC; 755 vid_hdr->data_size = cpu_to_be32(data_size); 756 vid_hdr->used_ebs = cpu_to_be32(used_ebs); 757 vid_hdr->data_crc = cpu_to_be32(crc); 758 759 retry: 760 pnum = ubi_wl_get_peb(ubi); 761 if (pnum < 0) { 762 ubi_free_vid_hdr(ubi, vid_hdr); 763 leb_write_unlock(ubi, vol_id, lnum); 764 return pnum; 765 } 766 767 dbg_eba("write VID hdr and %d bytes at LEB %d:%d, PEB %d, used_ebs %d", 768 len, vol_id, lnum, pnum, used_ebs); 769 770 err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr); 771 if (err) { 772 ubi_warn("failed to write VID header to LEB %d:%d, PEB %d", 773 vol_id, lnum, pnum); 774 goto write_error; 775 } 776 777 err = ubi_io_write_data(ubi, buf, pnum, 0, len); 778 if (err) { 779 ubi_warn("failed to write %d bytes of data to PEB %d", 780 len, pnum); 781 goto write_error; 782 } 783 784 ubi_assert(vol->eba_tbl[lnum] < 0); 785 down_read(&ubi->fm_sem); 786 vol->eba_tbl[lnum] = pnum; 787 up_read(&ubi->fm_sem); 788 789 leb_write_unlock(ubi, vol_id, lnum); 790 ubi_free_vid_hdr(ubi, vid_hdr); 791 return 0; 792 793 write_error: 794 if (err != -EIO || !ubi->bad_allowed) { 795 /* 796 * This flash device does not admit of bad eraseblocks or 797 * something nasty and unexpected happened. Switch to read-only 798 * mode just in case. 799 */ 800 ubi_ro_mode(ubi); 801 leb_write_unlock(ubi, vol_id, lnum); 802 ubi_free_vid_hdr(ubi, vid_hdr); 803 return err; 804 } 805 806 err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1); 807 if (err || ++tries > UBI_IO_RETRIES) { 808 ubi_ro_mode(ubi); 809 leb_write_unlock(ubi, vol_id, lnum); 810 ubi_free_vid_hdr(ubi, vid_hdr); 811 return err; 812 } 813 814 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); 815 ubi_msg("try another PEB"); 816 goto retry; 817 } 818 819 /* 820 * ubi_eba_atomic_leb_change - change logical eraseblock atomically. 821 * @ubi: UBI device description object 822 * @vol: volume description object 823 * @lnum: logical eraseblock number 824 * @buf: data to write 825 * @len: how many bytes to write 826 * 827 * This function changes the contents of a logical eraseblock atomically. @buf 828 * has to contain new logical eraseblock data, and @len - the length of the 829 * data, which has to be aligned. This function guarantees that in case of an 830 * unclean reboot the old contents is preserved. Returns zero in case of 831 * success and a negative error code in case of failure. 832 * 833 * UBI reserves one LEB for the "atomic LEB change" operation, so only one 834 * LEB change may be done at a time. This is ensured by @ubi->alc_mutex. 835 */ 836 int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol, 837 int lnum, const void *buf, int len) 838 { 839 int err, pnum, tries = 0, vol_id = vol->vol_id; 840 struct ubi_vid_hdr *vid_hdr; 841 uint32_t crc; 842 843 if (ubi->ro_mode) 844 return -EROFS; 845 846 if (len == 0) { 847 /* 848 * Special case when data length is zero. In this case the LEB 849 * has to be unmapped and mapped somewhere else. 850 */ 851 err = ubi_eba_unmap_leb(ubi, vol, lnum); 852 if (err) 853 return err; 854 return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0); 855 } 856 857 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); 858 if (!vid_hdr) 859 return -ENOMEM; 860 861 mutex_lock(&ubi->alc_mutex); 862 err = leb_write_lock(ubi, vol_id, lnum); 863 if (err) 864 goto out_mutex; 865 866 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); 867 vid_hdr->vol_id = cpu_to_be32(vol_id); 868 vid_hdr->lnum = cpu_to_be32(lnum); 869 vid_hdr->compat = ubi_get_compat(ubi, vol_id); 870 vid_hdr->data_pad = cpu_to_be32(vol->data_pad); 871 872 crc = crc32(UBI_CRC32_INIT, buf, len); 873 vid_hdr->vol_type = UBI_VID_DYNAMIC; 874 vid_hdr->data_size = cpu_to_be32(len); 875 vid_hdr->copy_flag = 1; 876 vid_hdr->data_crc = cpu_to_be32(crc); 877 878 retry: 879 pnum = ubi_wl_get_peb(ubi); 880 if (pnum < 0) { 881 err = pnum; 882 goto out_leb_unlock; 883 } 884 885 dbg_eba("change LEB %d:%d, PEB %d, write VID hdr to PEB %d", 886 vol_id, lnum, vol->eba_tbl[lnum], pnum); 887 888 err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr); 889 if (err) { 890 ubi_warn("failed to write VID header to LEB %d:%d, PEB %d", 891 vol_id, lnum, pnum); 892 goto write_error; 893 } 894 895 err = ubi_io_write_data(ubi, buf, pnum, 0, len); 896 if (err) { 897 ubi_warn("failed to write %d bytes of data to PEB %d", 898 len, pnum); 899 goto write_error; 900 } 901 902 if (vol->eba_tbl[lnum] >= 0) { 903 err = ubi_wl_put_peb(ubi, vol_id, lnum, vol->eba_tbl[lnum], 0); 904 if (err) 905 goto out_leb_unlock; 906 } 907 908 down_read(&ubi->fm_sem); 909 vol->eba_tbl[lnum] = pnum; 910 up_read(&ubi->fm_sem); 911 912 out_leb_unlock: 913 leb_write_unlock(ubi, vol_id, lnum); 914 out_mutex: 915 mutex_unlock(&ubi->alc_mutex); 916 ubi_free_vid_hdr(ubi, vid_hdr); 917 return err; 918 919 write_error: 920 if (err != -EIO || !ubi->bad_allowed) { 921 /* 922 * This flash device does not admit of bad eraseblocks or 923 * something nasty and unexpected happened. Switch to read-only 924 * mode just in case. 925 */ 926 ubi_ro_mode(ubi); 927 goto out_leb_unlock; 928 } 929 930 err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1); 931 if (err || ++tries > UBI_IO_RETRIES) { 932 ubi_ro_mode(ubi); 933 goto out_leb_unlock; 934 } 935 936 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); 937 ubi_msg("try another PEB"); 938 goto retry; 939 } 940 941 /** 942 * is_error_sane - check whether a read error is sane. 943 * @err: code of the error happened during reading 944 * 945 * This is a helper function for 'ubi_eba_copy_leb()' which is called when we 946 * cannot read data from the target PEB (an error @err happened). If the error 947 * code is sane, then we treat this error as non-fatal. Otherwise the error is 948 * fatal and UBI will be switched to R/O mode later. 949 * 950 * The idea is that we try not to switch to R/O mode if the read error is 951 * something which suggests there was a real read problem. E.g., %-EIO. Or a 952 * memory allocation failed (-%ENOMEM). Otherwise, it is safer to switch to R/O 953 * mode, simply because we do not know what happened at the MTD level, and we 954 * cannot handle this. E.g., the underlying driver may have become crazy, and 955 * it is safer to switch to R/O mode to preserve the data. 956 * 957 * And bear in mind, this is about reading from the target PEB, i.e. the PEB 958 * which we have just written. 959 */ 960 static int is_error_sane(int err) 961 { 962 if (err == -EIO || err == -ENOMEM || err == UBI_IO_BAD_HDR || 963 err == UBI_IO_BAD_HDR_EBADMSG || err == -ETIMEDOUT) 964 return 0; 965 return 1; 966 } 967 968 /** 969 * ubi_eba_copy_leb - copy logical eraseblock. 970 * @ubi: UBI device description object 971 * @from: physical eraseblock number from where to copy 972 * @to: physical eraseblock number where to copy 973 * @vid_hdr: VID header of the @from physical eraseblock 974 * 975 * This function copies logical eraseblock from physical eraseblock @from to 976 * physical eraseblock @to. The @vid_hdr buffer may be changed by this 977 * function. Returns: 978 * o %0 in case of success; 979 * o %MOVE_CANCEL_RACE, %MOVE_TARGET_WR_ERR, %MOVE_TARGET_BITFLIPS, etc; 980 * o a negative error code in case of failure. 981 */ 982 int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, 983 struct ubi_vid_hdr *vid_hdr) 984 { 985 int err, vol_id, lnum, data_size, aldata_size, idx; 986 struct ubi_volume *vol; 987 uint32_t crc; 988 989 vol_id = be32_to_cpu(vid_hdr->vol_id); 990 lnum = be32_to_cpu(vid_hdr->lnum); 991 992 dbg_wl("copy LEB %d:%d, PEB %d to PEB %d", vol_id, lnum, from, to); 993 994 if (vid_hdr->vol_type == UBI_VID_STATIC) { 995 data_size = be32_to_cpu(vid_hdr->data_size); 996 aldata_size = ALIGN(data_size, ubi->min_io_size); 997 } else 998 data_size = aldata_size = 999 ubi->leb_size - be32_to_cpu(vid_hdr->data_pad); 1000 1001 idx = vol_id2idx(ubi, vol_id); 1002 spin_lock(&ubi->volumes_lock); 1003 /* 1004 * Note, we may race with volume deletion, which means that the volume 1005 * this logical eraseblock belongs to might be being deleted. Since the 1006 * volume deletion un-maps all the volume's logical eraseblocks, it will 1007 * be locked in 'ubi_wl_put_peb()' and wait for the WL worker to finish. 1008 */ 1009 vol = ubi->volumes[idx]; 1010 spin_unlock(&ubi->volumes_lock); 1011 if (!vol) { 1012 /* No need to do further work, cancel */ 1013 dbg_wl("volume %d is being removed, cancel", vol_id); 1014 return MOVE_CANCEL_RACE; 1015 } 1016 1017 /* 1018 * We do not want anybody to write to this logical eraseblock while we 1019 * are moving it, so lock it. 1020 * 1021 * Note, we are using non-waiting locking here, because we cannot sleep 1022 * on the LEB, since it may cause deadlocks. Indeed, imagine a task is 1023 * unmapping the LEB which is mapped to the PEB we are going to move 1024 * (@from). This task locks the LEB and goes sleep in the 1025 * 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are 1026 * holding @ubi->move_mutex and go sleep on the LEB lock. So, if the 1027 * LEB is already locked, we just do not move it and return 1028 * %MOVE_RETRY. Note, we do not return %MOVE_CANCEL_RACE here because 1029 * we do not know the reasons of the contention - it may be just a 1030 * normal I/O on this LEB, so we want to re-try. 1031 */ 1032 err = leb_write_trylock(ubi, vol_id, lnum); 1033 if (err) { 1034 dbg_wl("contention on LEB %d:%d, cancel", vol_id, lnum); 1035 return MOVE_RETRY; 1036 } 1037 1038 /* 1039 * The LEB might have been put meanwhile, and the task which put it is 1040 * probably waiting on @ubi->move_mutex. No need to continue the work, 1041 * cancel it. 1042 */ 1043 if (vol->eba_tbl[lnum] != from) { 1044 dbg_wl("LEB %d:%d is no longer mapped to PEB %d, mapped to PEB %d, cancel", 1045 vol_id, lnum, from, vol->eba_tbl[lnum]); 1046 err = MOVE_CANCEL_RACE; 1047 goto out_unlock_leb; 1048 } 1049 1050 /* 1051 * OK, now the LEB is locked and we can safely start moving it. Since 1052 * this function utilizes the @ubi->peb_buf buffer which is shared 1053 * with some other functions - we lock the buffer by taking the 1054 * @ubi->buf_mutex. 1055 */ 1056 mutex_lock(&ubi->buf_mutex); 1057 dbg_wl("read %d bytes of data", aldata_size); 1058 err = ubi_io_read_data(ubi, ubi->peb_buf, from, 0, aldata_size); 1059 if (err && err != UBI_IO_BITFLIPS) { 1060 ubi_warn("error %d while reading data from PEB %d", 1061 err, from); 1062 err = MOVE_SOURCE_RD_ERR; 1063 goto out_unlock_buf; 1064 } 1065 1066 /* 1067 * Now we have got to calculate how much data we have to copy. In 1068 * case of a static volume it is fairly easy - the VID header contains 1069 * the data size. In case of a dynamic volume it is more difficult - we 1070 * have to read the contents, cut 0xFF bytes from the end and copy only 1071 * the first part. We must do this to avoid writing 0xFF bytes as it 1072 * may have some side-effects. And not only this. It is important not 1073 * to include those 0xFFs to CRC because later the they may be filled 1074 * by data. 1075 */ 1076 if (vid_hdr->vol_type == UBI_VID_DYNAMIC) 1077 aldata_size = data_size = 1078 ubi_calc_data_len(ubi, ubi->peb_buf, data_size); 1079 1080 cond_resched(); 1081 crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size); 1082 cond_resched(); 1083 1084 /* 1085 * It may turn out to be that the whole @from physical eraseblock 1086 * contains only 0xFF bytes. Then we have to only write the VID header 1087 * and do not write any data. This also means we should not set 1088 * @vid_hdr->copy_flag, @vid_hdr->data_size, and @vid_hdr->data_crc. 1089 */ 1090 if (data_size > 0) { 1091 vid_hdr->copy_flag = 1; 1092 vid_hdr->data_size = cpu_to_be32(data_size); 1093 vid_hdr->data_crc = cpu_to_be32(crc); 1094 } 1095 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); 1096 1097 err = ubi_io_write_vid_hdr(ubi, to, vid_hdr); 1098 if (err) { 1099 if (err == -EIO) 1100 err = MOVE_TARGET_WR_ERR; 1101 goto out_unlock_buf; 1102 } 1103 1104 cond_resched(); 1105 1106 /* Read the VID header back and check if it was written correctly */ 1107 err = ubi_io_read_vid_hdr(ubi, to, vid_hdr, 1); 1108 if (err) { 1109 if (err != UBI_IO_BITFLIPS) { 1110 ubi_warn("error %d while reading VID header back from PEB %d", 1111 err, to); 1112 if (is_error_sane(err)) 1113 err = MOVE_TARGET_RD_ERR; 1114 } else 1115 err = MOVE_TARGET_BITFLIPS; 1116 goto out_unlock_buf; 1117 } 1118 1119 if (data_size > 0) { 1120 err = ubi_io_write_data(ubi, ubi->peb_buf, to, 0, aldata_size); 1121 if (err) { 1122 if (err == -EIO) 1123 err = MOVE_TARGET_WR_ERR; 1124 goto out_unlock_buf; 1125 } 1126 1127 cond_resched(); 1128 1129 /* 1130 * We've written the data and are going to read it back to make 1131 * sure it was written correctly. 1132 */ 1133 memset(ubi->peb_buf, 0xFF, aldata_size); 1134 err = ubi_io_read_data(ubi, ubi->peb_buf, to, 0, aldata_size); 1135 if (err) { 1136 if (err != UBI_IO_BITFLIPS) { 1137 ubi_warn("error %d while reading data back from PEB %d", 1138 err, to); 1139 if (is_error_sane(err)) 1140 err = MOVE_TARGET_RD_ERR; 1141 } else 1142 err = MOVE_TARGET_BITFLIPS; 1143 goto out_unlock_buf; 1144 } 1145 1146 cond_resched(); 1147 1148 if (crc != crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size)) { 1149 ubi_warn("read data back from PEB %d and it is different", 1150 to); 1151 err = -EINVAL; 1152 goto out_unlock_buf; 1153 } 1154 } 1155 1156 ubi_assert(vol->eba_tbl[lnum] == from); 1157 down_read(&ubi->fm_sem); 1158 vol->eba_tbl[lnum] = to; 1159 up_read(&ubi->fm_sem); 1160 1161 out_unlock_buf: 1162 mutex_unlock(&ubi->buf_mutex); 1163 out_unlock_leb: 1164 leb_write_unlock(ubi, vol_id, lnum); 1165 return err; 1166 } 1167 1168 /** 1169 * print_rsvd_warning - warn about not having enough reserved PEBs. 1170 * @ubi: UBI device description object 1171 * 1172 * This is a helper function for 'ubi_eba_init()' which is called when UBI 1173 * cannot reserve enough PEBs for bad block handling. This function makes a 1174 * decision whether we have to print a warning or not. The algorithm is as 1175 * follows: 1176 * o if this is a new UBI image, then just print the warning 1177 * o if this is an UBI image which has already been used for some time, print 1178 * a warning only if we can reserve less than 10% of the expected amount of 1179 * the reserved PEB. 1180 * 1181 * The idea is that when UBI is used, PEBs become bad, and the reserved pool 1182 * of PEBs becomes smaller, which is normal and we do not want to scare users 1183 * with a warning every time they attach the MTD device. This was an issue 1184 * reported by real users. 1185 */ 1186 static void print_rsvd_warning(struct ubi_device *ubi, 1187 struct ubi_attach_info *ai) 1188 { 1189 /* 1190 * The 1 << 18 (256KiB) number is picked randomly, just a reasonably 1191 * large number to distinguish between newly flashed and used images. 1192 */ 1193 if (ai->max_sqnum > (1 << 18)) { 1194 int min = ubi->beb_rsvd_level / 10; 1195 1196 if (!min) 1197 min = 1; 1198 if (ubi->beb_rsvd_pebs > min) 1199 return; 1200 } 1201 1202 ubi_warn("cannot reserve enough PEBs for bad PEB handling, reserved %d, need %d", 1203 ubi->beb_rsvd_pebs, ubi->beb_rsvd_level); 1204 if (ubi->corr_peb_count) 1205 ubi_warn("%d PEBs are corrupted and not used", 1206 ubi->corr_peb_count); 1207 } 1208 1209 /** 1210 * self_check_eba - run a self check on the EBA table constructed by fastmap. 1211 * @ubi: UBI device description object 1212 * @ai_fastmap: UBI attach info object created by fastmap 1213 * @ai_scan: UBI attach info object created by scanning 1214 * 1215 * Returns < 0 in case of an internal error, 0 otherwise. 1216 * If a bad EBA table entry was found it will be printed out and 1217 * ubi_assert() triggers. 1218 */ 1219 int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap, 1220 struct ubi_attach_info *ai_scan) 1221 { 1222 int i, j, num_volumes, ret = 0; 1223 int **scan_eba, **fm_eba; 1224 struct ubi_ainf_volume *av; 1225 struct ubi_volume *vol; 1226 struct ubi_ainf_peb *aeb; 1227 struct rb_node *rb; 1228 1229 num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT; 1230 1231 scan_eba = kmalloc(sizeof(*scan_eba) * num_volumes, GFP_KERNEL); 1232 if (!scan_eba) 1233 return -ENOMEM; 1234 1235 fm_eba = kmalloc(sizeof(*fm_eba) * num_volumes, GFP_KERNEL); 1236 if (!fm_eba) { 1237 kfree(scan_eba); 1238 return -ENOMEM; 1239 } 1240 1241 for (i = 0; i < num_volumes; i++) { 1242 vol = ubi->volumes[i]; 1243 if (!vol) 1244 continue; 1245 1246 scan_eba[i] = kmalloc(vol->reserved_pebs * sizeof(**scan_eba), 1247 GFP_KERNEL); 1248 if (!scan_eba[i]) { 1249 ret = -ENOMEM; 1250 goto out_free; 1251 } 1252 1253 fm_eba[i] = kmalloc(vol->reserved_pebs * sizeof(**fm_eba), 1254 GFP_KERNEL); 1255 if (!fm_eba[i]) { 1256 ret = -ENOMEM; 1257 goto out_free; 1258 } 1259 1260 for (j = 0; j < vol->reserved_pebs; j++) 1261 scan_eba[i][j] = fm_eba[i][j] = UBI_LEB_UNMAPPED; 1262 1263 av = ubi_find_av(ai_scan, idx2vol_id(ubi, i)); 1264 if (!av) 1265 continue; 1266 1267 ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) 1268 scan_eba[i][aeb->lnum] = aeb->pnum; 1269 1270 av = ubi_find_av(ai_fastmap, idx2vol_id(ubi, i)); 1271 if (!av) 1272 continue; 1273 1274 ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) 1275 fm_eba[i][aeb->lnum] = aeb->pnum; 1276 1277 for (j = 0; j < vol->reserved_pebs; j++) { 1278 if (scan_eba[i][j] != fm_eba[i][j]) { 1279 if (scan_eba[i][j] == UBI_LEB_UNMAPPED || 1280 fm_eba[i][j] == UBI_LEB_UNMAPPED) 1281 continue; 1282 1283 ubi_err("LEB:%i:%i is PEB:%i instead of %i!", 1284 vol->vol_id, i, fm_eba[i][j], 1285 scan_eba[i][j]); 1286 ubi_assert(0); 1287 } 1288 } 1289 } 1290 1291 out_free: 1292 for (i = 0; i < num_volumes; i++) { 1293 if (!ubi->volumes[i]) 1294 continue; 1295 1296 kfree(scan_eba[i]); 1297 kfree(fm_eba[i]); 1298 } 1299 1300 kfree(scan_eba); 1301 kfree(fm_eba); 1302 return ret; 1303 } 1304 1305 /** 1306 * ubi_eba_init - initialize the EBA sub-system using attaching information. 1307 * @ubi: UBI device description object 1308 * @ai: attaching information 1309 * 1310 * This function returns zero in case of success and a negative error code in 1311 * case of failure. 1312 */ 1313 int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai) 1314 { 1315 int i, j, err, num_volumes; 1316 struct ubi_ainf_volume *av; 1317 struct ubi_volume *vol; 1318 struct ubi_ainf_peb *aeb; 1319 struct rb_node *rb; 1320 1321 dbg_eba("initialize EBA sub-system"); 1322 1323 spin_lock_init(&ubi->ltree_lock); 1324 mutex_init(&ubi->alc_mutex); 1325 ubi->ltree = RB_ROOT; 1326 1327 ubi->global_sqnum = ai->max_sqnum + 1; 1328 num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT; 1329 1330 for (i = 0; i < num_volumes; i++) { 1331 vol = ubi->volumes[i]; 1332 if (!vol) 1333 continue; 1334 1335 cond_resched(); 1336 1337 vol->eba_tbl = kmalloc(vol->reserved_pebs * sizeof(int), 1338 GFP_KERNEL); 1339 if (!vol->eba_tbl) { 1340 err = -ENOMEM; 1341 goto out_free; 1342 } 1343 1344 for (j = 0; j < vol->reserved_pebs; j++) 1345 vol->eba_tbl[j] = UBI_LEB_UNMAPPED; 1346 1347 av = ubi_find_av(ai, idx2vol_id(ubi, i)); 1348 if (!av) 1349 continue; 1350 1351 ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) { 1352 if (aeb->lnum >= vol->reserved_pebs) 1353 /* 1354 * This may happen in case of an unclean reboot 1355 * during re-size. 1356 */ 1357 ubi_move_aeb_to_list(av, aeb, &ai->erase); 1358 vol->eba_tbl[aeb->lnum] = aeb->pnum; 1359 } 1360 } 1361 1362 if (ubi->avail_pebs < EBA_RESERVED_PEBS) { 1363 ubi_err("no enough physical eraseblocks (%d, need %d)", 1364 ubi->avail_pebs, EBA_RESERVED_PEBS); 1365 if (ubi->corr_peb_count) 1366 ubi_err("%d PEBs are corrupted and not used", 1367 ubi->corr_peb_count); 1368 err = -ENOSPC; 1369 goto out_free; 1370 } 1371 ubi->avail_pebs -= EBA_RESERVED_PEBS; 1372 ubi->rsvd_pebs += EBA_RESERVED_PEBS; 1373 1374 if (ubi->bad_allowed) { 1375 ubi_calculate_reserved(ubi); 1376 1377 if (ubi->avail_pebs < ubi->beb_rsvd_level) { 1378 /* No enough free physical eraseblocks */ 1379 ubi->beb_rsvd_pebs = ubi->avail_pebs; 1380 print_rsvd_warning(ubi, ai); 1381 } else 1382 ubi->beb_rsvd_pebs = ubi->beb_rsvd_level; 1383 1384 ubi->avail_pebs -= ubi->beb_rsvd_pebs; 1385 ubi->rsvd_pebs += ubi->beb_rsvd_pebs; 1386 } 1387 1388 dbg_eba("EBA sub-system is initialized"); 1389 return 0; 1390 1391 out_free: 1392 for (i = 0; i < num_volumes; i++) { 1393 if (!ubi->volumes[i]) 1394 continue; 1395 kfree(ubi->volumes[i]->eba_tbl); 1396 ubi->volumes[i]->eba_tbl = NULL; 1397 } 1398 return err; 1399 } 1400