1 /* 2 * Copyright (c) International Business Machines Corp., 2006 3 * 4 * SPDX-License-Identifier: GPL-2.0+ 5 * 6 * Author: Artem Bityutskiy (Битюцкий Артём) 7 */ 8 9 /* 10 * The UBI Eraseblock Association (EBA) sub-system. 11 * 12 * This sub-system is responsible for I/O to/from logical eraseblock. 13 * 14 * Although in this implementation the EBA table is fully kept and managed in 15 * RAM, which assumes poor scalability, it might be (partially) maintained on 16 * flash in future implementations. 17 * 18 * The EBA sub-system implements per-logical eraseblock locking. Before 19 * accessing a logical eraseblock it is locked for reading or writing. The 20 * per-logical eraseblock locking is implemented by means of the lock tree. The 21 * lock tree is an RB-tree which refers all the currently locked logical 22 * eraseblocks. The lock tree elements are &struct ubi_ltree_entry objects. 23 * They are indexed by (@vol_id, @lnum) pairs. 24 * 25 * EBA also maintains the global sequence counter which is incremented each 26 * time a logical eraseblock is mapped to a physical eraseblock and it is 27 * stored in the volume identifier header. This means that each VID header has 28 * a unique sequence number. The sequence number is only increased an we assume 29 * 64 bits is enough to never overflow. 30 */ 31 32 #ifndef __UBOOT__ 33 #include <linux/slab.h> 34 #include <linux/crc32.h> 35 #else 36 #include <ubi_uboot.h> 37 #endif 38 39 #include <linux/err.h> 40 #include "ubi.h" 41 42 /* Number of physical eraseblocks reserved for atomic LEB change operation */ 43 #define EBA_RESERVED_PEBS 1 44 45 /** 46 * next_sqnum - get next sequence number. 47 * @ubi: UBI device description object 48 * 49 * This function returns next sequence number to use, which is just the current 50 * global sequence counter value. It also increases the global sequence 51 * counter. 52 */ 53 unsigned long long ubi_next_sqnum(struct ubi_device *ubi) 54 { 55 unsigned long long sqnum; 56 57 spin_lock(&ubi->ltree_lock); 58 sqnum = ubi->global_sqnum++; 59 spin_unlock(&ubi->ltree_lock); 60 61 return sqnum; 62 } 63 64 /** 65 * ubi_get_compat - get compatibility flags of a volume. 66 * @ubi: UBI device description object 67 * @vol_id: volume ID 68 * 69 * This function returns compatibility flags for an internal volume. User 70 * volumes have no compatibility flags, so %0 is returned. 71 */ 72 static int ubi_get_compat(const struct ubi_device *ubi, int vol_id) 73 { 74 if (vol_id == UBI_LAYOUT_VOLUME_ID) 75 return UBI_LAYOUT_VOLUME_COMPAT; 76 return 0; 77 } 78 79 /** 80 * ltree_lookup - look up the lock tree. 81 * @ubi: UBI device description object 82 * @vol_id: volume ID 83 * @lnum: logical eraseblock number 84 * 85 * This function returns a pointer to the corresponding &struct ubi_ltree_entry 86 * object if the logical eraseblock is locked and %NULL if it is not. 87 * @ubi->ltree_lock has to be locked. 88 */ 89 static struct ubi_ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id, 90 int lnum) 91 { 92 struct rb_node *p; 93 94 p = ubi->ltree.rb_node; 95 while (p) { 96 struct ubi_ltree_entry *le; 97 98 le = rb_entry(p, struct ubi_ltree_entry, rb); 99 100 if (vol_id < le->vol_id) 101 p = p->rb_left; 102 else if (vol_id > le->vol_id) 103 p = p->rb_right; 104 else { 105 if (lnum < le->lnum) 106 p = p->rb_left; 107 else if (lnum > le->lnum) 108 p = p->rb_right; 109 else 110 return le; 111 } 112 } 113 114 return NULL; 115 } 116 117 /** 118 * ltree_add_entry - add new entry to the lock tree. 119 * @ubi: UBI device description object 120 * @vol_id: volume ID 121 * @lnum: logical eraseblock number 122 * 123 * This function adds new entry for logical eraseblock (@vol_id, @lnum) to the 124 * lock tree. If such entry is already there, its usage counter is increased. 125 * Returns pointer to the lock tree entry or %-ENOMEM if memory allocation 126 * failed. 127 */ 128 static struct ubi_ltree_entry *ltree_add_entry(struct ubi_device *ubi, 129 int vol_id, int lnum) 130 { 131 struct ubi_ltree_entry *le, *le1, *le_free; 132 133 le = kmalloc(sizeof(struct ubi_ltree_entry), GFP_NOFS); 134 if (!le) 135 return ERR_PTR(-ENOMEM); 136 137 le->users = 0; 138 init_rwsem(&le->mutex); 139 le->vol_id = vol_id; 140 le->lnum = lnum; 141 142 spin_lock(&ubi->ltree_lock); 143 le1 = ltree_lookup(ubi, vol_id, lnum); 144 145 if (le1) { 146 /* 147 * This logical eraseblock is already locked. The newly 148 * allocated lock entry is not needed. 149 */ 150 le_free = le; 151 le = le1; 152 } else { 153 struct rb_node **p, *parent = NULL; 154 155 /* 156 * No lock entry, add the newly allocated one to the 157 * @ubi->ltree RB-tree. 158 */ 159 le_free = NULL; 160 161 p = &ubi->ltree.rb_node; 162 while (*p) { 163 parent = *p; 164 le1 = rb_entry(parent, struct ubi_ltree_entry, rb); 165 166 if (vol_id < le1->vol_id) 167 p = &(*p)->rb_left; 168 else if (vol_id > le1->vol_id) 169 p = &(*p)->rb_right; 170 else { 171 ubi_assert(lnum != le1->lnum); 172 if (lnum < le1->lnum) 173 p = &(*p)->rb_left; 174 else 175 p = &(*p)->rb_right; 176 } 177 } 178 179 rb_link_node(&le->rb, parent, p); 180 rb_insert_color(&le->rb, &ubi->ltree); 181 } 182 le->users += 1; 183 spin_unlock(&ubi->ltree_lock); 184 185 kfree(le_free); 186 return le; 187 } 188 189 /** 190 * leb_read_lock - lock logical eraseblock for reading. 191 * @ubi: UBI device description object 192 * @vol_id: volume ID 193 * @lnum: logical eraseblock number 194 * 195 * This function locks a logical eraseblock for reading. Returns zero in case 196 * of success and a negative error code in case of failure. 197 */ 198 static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum) 199 { 200 struct ubi_ltree_entry *le; 201 202 le = ltree_add_entry(ubi, vol_id, lnum); 203 if (IS_ERR(le)) 204 return PTR_ERR(le); 205 down_read(&le->mutex); 206 return 0; 207 } 208 209 /** 210 * leb_read_unlock - unlock logical eraseblock. 211 * @ubi: UBI device description object 212 * @vol_id: volume ID 213 * @lnum: logical eraseblock number 214 */ 215 static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum) 216 { 217 struct ubi_ltree_entry *le; 218 219 spin_lock(&ubi->ltree_lock); 220 le = ltree_lookup(ubi, vol_id, lnum); 221 le->users -= 1; 222 ubi_assert(le->users >= 0); 223 up_read(&le->mutex); 224 if (le->users == 0) { 225 rb_erase(&le->rb, &ubi->ltree); 226 kfree(le); 227 } 228 spin_unlock(&ubi->ltree_lock); 229 } 230 231 /** 232 * leb_write_lock - lock logical eraseblock for writing. 233 * @ubi: UBI device description object 234 * @vol_id: volume ID 235 * @lnum: logical eraseblock number 236 * 237 * This function locks a logical eraseblock for writing. Returns zero in case 238 * of success and a negative error code in case of failure. 239 */ 240 static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum) 241 { 242 struct ubi_ltree_entry *le; 243 244 le = ltree_add_entry(ubi, vol_id, lnum); 245 if (IS_ERR(le)) 246 return PTR_ERR(le); 247 down_write(&le->mutex); 248 return 0; 249 } 250 251 /** 252 * leb_write_lock - lock logical eraseblock for writing. 253 * @ubi: UBI device description object 254 * @vol_id: volume ID 255 * @lnum: logical eraseblock number 256 * 257 * This function locks a logical eraseblock for writing if there is no 258 * contention and does nothing if there is contention. Returns %0 in case of 259 * success, %1 in case of contention, and and a negative error code in case of 260 * failure. 261 */ 262 static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum) 263 { 264 struct ubi_ltree_entry *le; 265 266 le = ltree_add_entry(ubi, vol_id, lnum); 267 if (IS_ERR(le)) 268 return PTR_ERR(le); 269 if (down_write_trylock(&le->mutex)) 270 return 0; 271 272 /* Contention, cancel */ 273 spin_lock(&ubi->ltree_lock); 274 le->users -= 1; 275 ubi_assert(le->users >= 0); 276 if (le->users == 0) { 277 rb_erase(&le->rb, &ubi->ltree); 278 kfree(le); 279 } 280 spin_unlock(&ubi->ltree_lock); 281 282 return 1; 283 } 284 285 /** 286 * leb_write_unlock - unlock logical eraseblock. 287 * @ubi: UBI device description object 288 * @vol_id: volume ID 289 * @lnum: logical eraseblock number 290 */ 291 static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum) 292 { 293 struct ubi_ltree_entry *le; 294 295 spin_lock(&ubi->ltree_lock); 296 le = ltree_lookup(ubi, vol_id, lnum); 297 le->users -= 1; 298 ubi_assert(le->users >= 0); 299 up_write(&le->mutex); 300 if (le->users == 0) { 301 rb_erase(&le->rb, &ubi->ltree); 302 kfree(le); 303 } 304 spin_unlock(&ubi->ltree_lock); 305 } 306 307 /** 308 * ubi_eba_unmap_leb - un-map logical eraseblock. 309 * @ubi: UBI device description object 310 * @vol: volume description object 311 * @lnum: logical eraseblock number 312 * 313 * This function un-maps logical eraseblock @lnum and schedules corresponding 314 * physical eraseblock for erasure. Returns zero in case of success and a 315 * negative error code in case of failure. 316 */ 317 int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol, 318 int lnum) 319 { 320 int err, pnum, vol_id = vol->vol_id; 321 322 if (ubi->ro_mode) 323 return -EROFS; 324 325 err = leb_write_lock(ubi, vol_id, lnum); 326 if (err) 327 return err; 328 329 pnum = vol->eba_tbl[lnum]; 330 if (pnum < 0) 331 /* This logical eraseblock is already unmapped */ 332 goto out_unlock; 333 334 dbg_eba("erase LEB %d:%d, PEB %d", vol_id, lnum, pnum); 335 336 down_read(&ubi->fm_eba_sem); 337 vol->eba_tbl[lnum] = UBI_LEB_UNMAPPED; 338 up_read(&ubi->fm_eba_sem); 339 err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 0); 340 341 out_unlock: 342 leb_write_unlock(ubi, vol_id, lnum); 343 return err; 344 } 345 346 /** 347 * ubi_eba_read_leb - read data. 348 * @ubi: UBI device description object 349 * @vol: volume description object 350 * @lnum: logical eraseblock number 351 * @buf: buffer to store the read data 352 * @offset: offset from where to read 353 * @len: how many bytes to read 354 * @check: data CRC check flag 355 * 356 * If the logical eraseblock @lnum is unmapped, @buf is filled with 0xFF 357 * bytes. The @check flag only makes sense for static volumes and forces 358 * eraseblock data CRC checking. 359 * 360 * In case of success this function returns zero. In case of a static volume, 361 * if data CRC mismatches - %-EBADMSG is returned. %-EBADMSG may also be 362 * returned for any volume type if an ECC error was detected by the MTD device 363 * driver. Other negative error cored may be returned in case of other errors. 364 */ 365 int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, 366 void *buf, int offset, int len, int check) 367 { 368 int err, pnum, scrub = 0, vol_id = vol->vol_id; 369 struct ubi_vid_hdr *vid_hdr; 370 uint32_t uninitialized_var(crc); 371 372 err = leb_read_lock(ubi, vol_id, lnum); 373 if (err) 374 return err; 375 376 pnum = vol->eba_tbl[lnum]; 377 if (pnum < 0) { 378 /* 379 * The logical eraseblock is not mapped, fill the whole buffer 380 * with 0xFF bytes. The exception is static volumes for which 381 * it is an error to read unmapped logical eraseblocks. 382 */ 383 dbg_eba("read %d bytes from offset %d of LEB %d:%d (unmapped)", 384 len, offset, vol_id, lnum); 385 leb_read_unlock(ubi, vol_id, lnum); 386 ubi_assert(vol->vol_type != UBI_STATIC_VOLUME); 387 memset(buf, 0xFF, len); 388 return 0; 389 } 390 391 dbg_eba("read %d bytes from offset %d of LEB %d:%d, PEB %d", 392 len, offset, vol_id, lnum, pnum); 393 394 if (vol->vol_type == UBI_DYNAMIC_VOLUME) 395 check = 0; 396 397 retry: 398 if (check) { 399 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); 400 if (!vid_hdr) { 401 err = -ENOMEM; 402 goto out_unlock; 403 } 404 405 err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1); 406 if (err && err != UBI_IO_BITFLIPS) { 407 if (err > 0) { 408 /* 409 * The header is either absent or corrupted. 410 * The former case means there is a bug - 411 * switch to read-only mode just in case. 412 * The latter case means a real corruption - we 413 * may try to recover data. FIXME: but this is 414 * not implemented. 415 */ 416 if (err == UBI_IO_BAD_HDR_EBADMSG || 417 err == UBI_IO_BAD_HDR) { 418 ubi_warn(ubi, "corrupted VID header at PEB %d, LEB %d:%d", 419 pnum, vol_id, lnum); 420 err = -EBADMSG; 421 } else { 422 err = -EINVAL; 423 ubi_ro_mode(ubi); 424 } 425 } 426 goto out_free; 427 } else if (err == UBI_IO_BITFLIPS) 428 scrub = 1; 429 430 ubi_assert(lnum < be32_to_cpu(vid_hdr->used_ebs)); 431 ubi_assert(len == be32_to_cpu(vid_hdr->data_size)); 432 433 crc = be32_to_cpu(vid_hdr->data_crc); 434 ubi_free_vid_hdr(ubi, vid_hdr); 435 } 436 437 err = ubi_io_read_data(ubi, buf, pnum, offset, len); 438 if (err) { 439 if (err == UBI_IO_BITFLIPS) 440 scrub = 1; 441 else if (mtd_is_eccerr(err)) { 442 if (vol->vol_type == UBI_DYNAMIC_VOLUME) 443 goto out_unlock; 444 scrub = 1; 445 if (!check) { 446 ubi_msg(ubi, "force data checking"); 447 check = 1; 448 goto retry; 449 } 450 } else 451 goto out_unlock; 452 } 453 454 if (check) { 455 uint32_t crc1 = crc32(UBI_CRC32_INIT, buf, len); 456 if (crc1 != crc) { 457 ubi_warn(ubi, "CRC error: calculated %#08x, must be %#08x", 458 crc1, crc); 459 err = -EBADMSG; 460 goto out_unlock; 461 } 462 } 463 464 if (scrub) 465 err = ubi_wl_scrub_peb(ubi, pnum); 466 467 leb_read_unlock(ubi, vol_id, lnum); 468 return err; 469 470 out_free: 471 ubi_free_vid_hdr(ubi, vid_hdr); 472 out_unlock: 473 leb_read_unlock(ubi, vol_id, lnum); 474 return err; 475 } 476 477 #ifndef __UBOOT__ 478 /** 479 * ubi_eba_read_leb_sg - read data into a scatter gather list. 480 * @ubi: UBI device description object 481 * @vol: volume description object 482 * @lnum: logical eraseblock number 483 * @sgl: UBI scatter gather list to store the read data 484 * @offset: offset from where to read 485 * @len: how many bytes to read 486 * @check: data CRC check flag 487 * 488 * This function works exactly like ubi_eba_read_leb(). But instead of 489 * storing the read data into a buffer it writes to an UBI scatter gather 490 * list. 491 */ 492 int ubi_eba_read_leb_sg(struct ubi_device *ubi, struct ubi_volume *vol, 493 struct ubi_sgl *sgl, int lnum, int offset, int len, 494 int check) 495 { 496 int to_read; 497 int ret; 498 struct scatterlist *sg; 499 500 for (;;) { 501 ubi_assert(sgl->list_pos < UBI_MAX_SG_COUNT); 502 sg = &sgl->sg[sgl->list_pos]; 503 if (len < sg->length - sgl->page_pos) 504 to_read = len; 505 else 506 to_read = sg->length - sgl->page_pos; 507 508 ret = ubi_eba_read_leb(ubi, vol, lnum, 509 sg_virt(sg) + sgl->page_pos, offset, 510 to_read, check); 511 if (ret < 0) 512 return ret; 513 514 offset += to_read; 515 len -= to_read; 516 if (!len) { 517 sgl->page_pos += to_read; 518 if (sgl->page_pos == sg->length) { 519 sgl->list_pos++; 520 sgl->page_pos = 0; 521 } 522 523 break; 524 } 525 526 sgl->list_pos++; 527 sgl->page_pos = 0; 528 } 529 530 return ret; 531 } 532 #endif 533 534 /** 535 * recover_peb - recover from write failure. 536 * @ubi: UBI device description object 537 * @pnum: the physical eraseblock to recover 538 * @vol_id: volume ID 539 * @lnum: logical eraseblock number 540 * @buf: data which was not written because of the write failure 541 * @offset: offset of the failed write 542 * @len: how many bytes should have been written 543 * 544 * This function is called in case of a write failure and moves all good data 545 * from the potentially bad physical eraseblock to a good physical eraseblock. 546 * This function also writes the data which was not written due to the failure. 547 * Returns new physical eraseblock number in case of success, and a negative 548 * error code in case of failure. 549 */ 550 static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum, 551 const void *buf, int offset, int len) 552 { 553 int err, idx = vol_id2idx(ubi, vol_id), new_pnum, data_size, tries = 0; 554 struct ubi_volume *vol = ubi->volumes[idx]; 555 struct ubi_vid_hdr *vid_hdr; 556 557 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); 558 if (!vid_hdr) 559 return -ENOMEM; 560 561 retry: 562 new_pnum = ubi_wl_get_peb(ubi); 563 if (new_pnum < 0) { 564 ubi_free_vid_hdr(ubi, vid_hdr); 565 up_read(&ubi->fm_eba_sem); 566 return new_pnum; 567 } 568 569 ubi_msg(ubi, "recover PEB %d, move data to PEB %d", 570 pnum, new_pnum); 571 572 err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1); 573 if (err && err != UBI_IO_BITFLIPS) { 574 if (err > 0) 575 err = -EIO; 576 up_read(&ubi->fm_eba_sem); 577 goto out_put; 578 } 579 580 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); 581 err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr); 582 if (err) { 583 up_read(&ubi->fm_eba_sem); 584 goto write_error; 585 } 586 587 data_size = offset + len; 588 mutex_lock(&ubi->buf_mutex); 589 memset(ubi->peb_buf + offset, 0xFF, len); 590 591 /* Read everything before the area where the write failure happened */ 592 if (offset > 0) { 593 err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, offset); 594 if (err && err != UBI_IO_BITFLIPS) { 595 up_read(&ubi->fm_eba_sem); 596 goto out_unlock; 597 } 598 } 599 600 memcpy(ubi->peb_buf + offset, buf, len); 601 602 err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size); 603 if (err) { 604 mutex_unlock(&ubi->buf_mutex); 605 up_read(&ubi->fm_eba_sem); 606 goto write_error; 607 } 608 609 mutex_unlock(&ubi->buf_mutex); 610 ubi_free_vid_hdr(ubi, vid_hdr); 611 612 vol->eba_tbl[lnum] = new_pnum; 613 up_read(&ubi->fm_eba_sem); 614 ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1); 615 616 ubi_msg(ubi, "data was successfully recovered"); 617 return 0; 618 619 out_unlock: 620 mutex_unlock(&ubi->buf_mutex); 621 out_put: 622 ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1); 623 ubi_free_vid_hdr(ubi, vid_hdr); 624 return err; 625 626 write_error: 627 /* 628 * Bad luck? This physical eraseblock is bad too? Crud. Let's try to 629 * get another one. 630 */ 631 ubi_warn(ubi, "failed to write to PEB %d", new_pnum); 632 ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1); 633 if (++tries > UBI_IO_RETRIES) { 634 ubi_free_vid_hdr(ubi, vid_hdr); 635 return err; 636 } 637 ubi_msg(ubi, "try again"); 638 goto retry; 639 } 640 641 /** 642 * ubi_eba_write_leb - write data to dynamic volume. 643 * @ubi: UBI device description object 644 * @vol: volume description object 645 * @lnum: logical eraseblock number 646 * @buf: the data to write 647 * @offset: offset within the logical eraseblock where to write 648 * @len: how many bytes to write 649 * 650 * This function writes data to logical eraseblock @lnum of a dynamic volume 651 * @vol. Returns zero in case of success and a negative error code in case 652 * of failure. In case of error, it is possible that something was still 653 * written to the flash media, but may be some garbage. 654 */ 655 int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, 656 const void *buf, int offset, int len) 657 { 658 int err, pnum, tries = 0, vol_id = vol->vol_id; 659 struct ubi_vid_hdr *vid_hdr; 660 661 if (ubi->ro_mode) 662 return -EROFS; 663 664 err = leb_write_lock(ubi, vol_id, lnum); 665 if (err) 666 return err; 667 668 pnum = vol->eba_tbl[lnum]; 669 if (pnum >= 0) { 670 dbg_eba("write %d bytes at offset %d of LEB %d:%d, PEB %d", 671 len, offset, vol_id, lnum, pnum); 672 673 err = ubi_io_write_data(ubi, buf, pnum, offset, len); 674 if (err) { 675 ubi_warn(ubi, "failed to write data to PEB %d", pnum); 676 if (err == -EIO && ubi->bad_allowed) 677 err = recover_peb(ubi, pnum, vol_id, lnum, buf, 678 offset, len); 679 if (err) 680 ubi_ro_mode(ubi); 681 } 682 leb_write_unlock(ubi, vol_id, lnum); 683 return err; 684 } 685 686 /* 687 * The logical eraseblock is not mapped. We have to get a free physical 688 * eraseblock and write the volume identifier header there first. 689 */ 690 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); 691 if (!vid_hdr) { 692 leb_write_unlock(ubi, vol_id, lnum); 693 return -ENOMEM; 694 } 695 696 vid_hdr->vol_type = UBI_VID_DYNAMIC; 697 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); 698 vid_hdr->vol_id = cpu_to_be32(vol_id); 699 vid_hdr->lnum = cpu_to_be32(lnum); 700 vid_hdr->compat = ubi_get_compat(ubi, vol_id); 701 vid_hdr->data_pad = cpu_to_be32(vol->data_pad); 702 703 retry: 704 pnum = ubi_wl_get_peb(ubi); 705 if (pnum < 0) { 706 ubi_free_vid_hdr(ubi, vid_hdr); 707 leb_write_unlock(ubi, vol_id, lnum); 708 up_read(&ubi->fm_eba_sem); 709 return pnum; 710 } 711 712 dbg_eba("write VID hdr and %d bytes at offset %d of LEB %d:%d, PEB %d", 713 len, offset, vol_id, lnum, pnum); 714 715 err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr); 716 if (err) { 717 ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d", 718 vol_id, lnum, pnum); 719 up_read(&ubi->fm_eba_sem); 720 goto write_error; 721 } 722 723 if (len) { 724 err = ubi_io_write_data(ubi, buf, pnum, offset, len); 725 if (err) { 726 ubi_warn(ubi, "failed to write %d bytes at offset %d of LEB %d:%d, PEB %d", 727 len, offset, vol_id, lnum, pnum); 728 up_read(&ubi->fm_eba_sem); 729 goto write_error; 730 } 731 } 732 733 vol->eba_tbl[lnum] = pnum; 734 up_read(&ubi->fm_eba_sem); 735 736 leb_write_unlock(ubi, vol_id, lnum); 737 ubi_free_vid_hdr(ubi, vid_hdr); 738 return 0; 739 740 write_error: 741 if (err != -EIO || !ubi->bad_allowed) { 742 ubi_ro_mode(ubi); 743 leb_write_unlock(ubi, vol_id, lnum); 744 ubi_free_vid_hdr(ubi, vid_hdr); 745 return err; 746 } 747 748 /* 749 * Fortunately, this is the first write operation to this physical 750 * eraseblock, so just put it and request a new one. We assume that if 751 * this physical eraseblock went bad, the erase code will handle that. 752 */ 753 err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1); 754 if (err || ++tries > UBI_IO_RETRIES) { 755 ubi_ro_mode(ubi); 756 leb_write_unlock(ubi, vol_id, lnum); 757 ubi_free_vid_hdr(ubi, vid_hdr); 758 return err; 759 } 760 761 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); 762 ubi_msg(ubi, "try another PEB"); 763 goto retry; 764 } 765 766 /** 767 * ubi_eba_write_leb_st - write data to static volume. 768 * @ubi: UBI device description object 769 * @vol: volume description object 770 * @lnum: logical eraseblock number 771 * @buf: data to write 772 * @len: how many bytes to write 773 * @used_ebs: how many logical eraseblocks will this volume contain 774 * 775 * This function writes data to logical eraseblock @lnum of static volume 776 * @vol. The @used_ebs argument should contain total number of logical 777 * eraseblock in this static volume. 778 * 779 * When writing to the last logical eraseblock, the @len argument doesn't have 780 * to be aligned to the minimal I/O unit size. Instead, it has to be equivalent 781 * to the real data size, although the @buf buffer has to contain the 782 * alignment. In all other cases, @len has to be aligned. 783 * 784 * It is prohibited to write more than once to logical eraseblocks of static 785 * volumes. This function returns zero in case of success and a negative error 786 * code in case of failure. 787 */ 788 int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol, 789 int lnum, const void *buf, int len, int used_ebs) 790 { 791 int err, pnum, tries = 0, data_size = len, vol_id = vol->vol_id; 792 struct ubi_vid_hdr *vid_hdr; 793 uint32_t crc; 794 795 if (ubi->ro_mode) 796 return -EROFS; 797 798 if (lnum == used_ebs - 1) 799 /* If this is the last LEB @len may be unaligned */ 800 len = ALIGN(data_size, ubi->min_io_size); 801 else 802 ubi_assert(!(len & (ubi->min_io_size - 1))); 803 804 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); 805 if (!vid_hdr) 806 return -ENOMEM; 807 808 err = leb_write_lock(ubi, vol_id, lnum); 809 if (err) { 810 ubi_free_vid_hdr(ubi, vid_hdr); 811 return err; 812 } 813 814 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); 815 vid_hdr->vol_id = cpu_to_be32(vol_id); 816 vid_hdr->lnum = cpu_to_be32(lnum); 817 vid_hdr->compat = ubi_get_compat(ubi, vol_id); 818 vid_hdr->data_pad = cpu_to_be32(vol->data_pad); 819 820 crc = crc32(UBI_CRC32_INIT, buf, data_size); 821 vid_hdr->vol_type = UBI_VID_STATIC; 822 vid_hdr->data_size = cpu_to_be32(data_size); 823 vid_hdr->used_ebs = cpu_to_be32(used_ebs); 824 vid_hdr->data_crc = cpu_to_be32(crc); 825 826 retry: 827 pnum = ubi_wl_get_peb(ubi); 828 if (pnum < 0) { 829 ubi_free_vid_hdr(ubi, vid_hdr); 830 leb_write_unlock(ubi, vol_id, lnum); 831 up_read(&ubi->fm_eba_sem); 832 return pnum; 833 } 834 835 dbg_eba("write VID hdr and %d bytes at LEB %d:%d, PEB %d, used_ebs %d", 836 len, vol_id, lnum, pnum, used_ebs); 837 838 err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr); 839 if (err) { 840 ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d", 841 vol_id, lnum, pnum); 842 up_read(&ubi->fm_eba_sem); 843 goto write_error; 844 } 845 846 err = ubi_io_write_data(ubi, buf, pnum, 0, len); 847 if (err) { 848 ubi_warn(ubi, "failed to write %d bytes of data to PEB %d", 849 len, pnum); 850 up_read(&ubi->fm_eba_sem); 851 goto write_error; 852 } 853 854 ubi_assert(vol->eba_tbl[lnum] < 0); 855 vol->eba_tbl[lnum] = pnum; 856 up_read(&ubi->fm_eba_sem); 857 858 leb_write_unlock(ubi, vol_id, lnum); 859 ubi_free_vid_hdr(ubi, vid_hdr); 860 return 0; 861 862 write_error: 863 if (err != -EIO || !ubi->bad_allowed) { 864 /* 865 * This flash device does not admit of bad eraseblocks or 866 * something nasty and unexpected happened. Switch to read-only 867 * mode just in case. 868 */ 869 ubi_ro_mode(ubi); 870 leb_write_unlock(ubi, vol_id, lnum); 871 ubi_free_vid_hdr(ubi, vid_hdr); 872 return err; 873 } 874 875 err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1); 876 if (err || ++tries > UBI_IO_RETRIES) { 877 ubi_ro_mode(ubi); 878 leb_write_unlock(ubi, vol_id, lnum); 879 ubi_free_vid_hdr(ubi, vid_hdr); 880 return err; 881 } 882 883 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); 884 ubi_msg(ubi, "try another PEB"); 885 goto retry; 886 } 887 888 /* 889 * ubi_eba_atomic_leb_change - change logical eraseblock atomically. 890 * @ubi: UBI device description object 891 * @vol: volume description object 892 * @lnum: logical eraseblock number 893 * @buf: data to write 894 * @len: how many bytes to write 895 * 896 * This function changes the contents of a logical eraseblock atomically. @buf 897 * has to contain new logical eraseblock data, and @len - the length of the 898 * data, which has to be aligned. This function guarantees that in case of an 899 * unclean reboot the old contents is preserved. Returns zero in case of 900 * success and a negative error code in case of failure. 901 * 902 * UBI reserves one LEB for the "atomic LEB change" operation, so only one 903 * LEB change may be done at a time. This is ensured by @ubi->alc_mutex. 904 */ 905 int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol, 906 int lnum, const void *buf, int len) 907 { 908 int err, pnum, old_pnum, tries = 0, vol_id = vol->vol_id; 909 struct ubi_vid_hdr *vid_hdr; 910 uint32_t crc; 911 912 if (ubi->ro_mode) 913 return -EROFS; 914 915 if (len == 0) { 916 /* 917 * Special case when data length is zero. In this case the LEB 918 * has to be unmapped and mapped somewhere else. 919 */ 920 err = ubi_eba_unmap_leb(ubi, vol, lnum); 921 if (err) 922 return err; 923 return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0); 924 } 925 926 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); 927 if (!vid_hdr) 928 return -ENOMEM; 929 930 mutex_lock(&ubi->alc_mutex); 931 err = leb_write_lock(ubi, vol_id, lnum); 932 if (err) 933 goto out_mutex; 934 935 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); 936 vid_hdr->vol_id = cpu_to_be32(vol_id); 937 vid_hdr->lnum = cpu_to_be32(lnum); 938 vid_hdr->compat = ubi_get_compat(ubi, vol_id); 939 vid_hdr->data_pad = cpu_to_be32(vol->data_pad); 940 941 crc = crc32(UBI_CRC32_INIT, buf, len); 942 vid_hdr->vol_type = UBI_VID_DYNAMIC; 943 vid_hdr->data_size = cpu_to_be32(len); 944 vid_hdr->copy_flag = 1; 945 vid_hdr->data_crc = cpu_to_be32(crc); 946 947 retry: 948 pnum = ubi_wl_get_peb(ubi); 949 if (pnum < 0) { 950 err = pnum; 951 up_read(&ubi->fm_eba_sem); 952 goto out_leb_unlock; 953 } 954 955 dbg_eba("change LEB %d:%d, PEB %d, write VID hdr to PEB %d", 956 vol_id, lnum, vol->eba_tbl[lnum], pnum); 957 958 err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr); 959 if (err) { 960 ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d", 961 vol_id, lnum, pnum); 962 up_read(&ubi->fm_eba_sem); 963 goto write_error; 964 } 965 966 err = ubi_io_write_data(ubi, buf, pnum, 0, len); 967 if (err) { 968 ubi_warn(ubi, "failed to write %d bytes of data to PEB %d", 969 len, pnum); 970 up_read(&ubi->fm_eba_sem); 971 goto write_error; 972 } 973 974 old_pnum = vol->eba_tbl[lnum]; 975 vol->eba_tbl[lnum] = pnum; 976 up_read(&ubi->fm_eba_sem); 977 978 if (old_pnum >= 0) { 979 err = ubi_wl_put_peb(ubi, vol_id, lnum, old_pnum, 0); 980 if (err) 981 goto out_leb_unlock; 982 } 983 984 out_leb_unlock: 985 leb_write_unlock(ubi, vol_id, lnum); 986 out_mutex: 987 mutex_unlock(&ubi->alc_mutex); 988 ubi_free_vid_hdr(ubi, vid_hdr); 989 return err; 990 991 write_error: 992 if (err != -EIO || !ubi->bad_allowed) { 993 /* 994 * This flash device does not admit of bad eraseblocks or 995 * something nasty and unexpected happened. Switch to read-only 996 * mode just in case. 997 */ 998 ubi_ro_mode(ubi); 999 goto out_leb_unlock; 1000 } 1001 1002 err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1); 1003 if (err || ++tries > UBI_IO_RETRIES) { 1004 ubi_ro_mode(ubi); 1005 goto out_leb_unlock; 1006 } 1007 1008 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); 1009 ubi_msg(ubi, "try another PEB"); 1010 goto retry; 1011 } 1012 1013 /** 1014 * is_error_sane - check whether a read error is sane. 1015 * @err: code of the error happened during reading 1016 * 1017 * This is a helper function for 'ubi_eba_copy_leb()' which is called when we 1018 * cannot read data from the target PEB (an error @err happened). If the error 1019 * code is sane, then we treat this error as non-fatal. Otherwise the error is 1020 * fatal and UBI will be switched to R/O mode later. 1021 * 1022 * The idea is that we try not to switch to R/O mode if the read error is 1023 * something which suggests there was a real read problem. E.g., %-EIO. Or a 1024 * memory allocation failed (-%ENOMEM). Otherwise, it is safer to switch to R/O 1025 * mode, simply because we do not know what happened at the MTD level, and we 1026 * cannot handle this. E.g., the underlying driver may have become crazy, and 1027 * it is safer to switch to R/O mode to preserve the data. 1028 * 1029 * And bear in mind, this is about reading from the target PEB, i.e. the PEB 1030 * which we have just written. 1031 */ 1032 static int is_error_sane(int err) 1033 { 1034 if (err == -EIO || err == -ENOMEM || err == UBI_IO_BAD_HDR || 1035 err == UBI_IO_BAD_HDR_EBADMSG || err == -ETIMEDOUT) 1036 return 0; 1037 return 1; 1038 } 1039 1040 /** 1041 * ubi_eba_copy_leb - copy logical eraseblock. 1042 * @ubi: UBI device description object 1043 * @from: physical eraseblock number from where to copy 1044 * @to: physical eraseblock number where to copy 1045 * @vid_hdr: VID header of the @from physical eraseblock 1046 * 1047 * This function copies logical eraseblock from physical eraseblock @from to 1048 * physical eraseblock @to. The @vid_hdr buffer may be changed by this 1049 * function. Returns: 1050 * o %0 in case of success; 1051 * o %MOVE_CANCEL_RACE, %MOVE_TARGET_WR_ERR, %MOVE_TARGET_BITFLIPS, etc; 1052 * o a negative error code in case of failure. 1053 */ 1054 int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, 1055 struct ubi_vid_hdr *vid_hdr) 1056 { 1057 int err, vol_id, lnum, data_size, aldata_size, idx; 1058 struct ubi_volume *vol; 1059 uint32_t crc; 1060 1061 vol_id = be32_to_cpu(vid_hdr->vol_id); 1062 lnum = be32_to_cpu(vid_hdr->lnum); 1063 1064 dbg_wl("copy LEB %d:%d, PEB %d to PEB %d", vol_id, lnum, from, to); 1065 1066 if (vid_hdr->vol_type == UBI_VID_STATIC) { 1067 data_size = be32_to_cpu(vid_hdr->data_size); 1068 aldata_size = ALIGN(data_size, ubi->min_io_size); 1069 } else 1070 data_size = aldata_size = 1071 ubi->leb_size - be32_to_cpu(vid_hdr->data_pad); 1072 1073 idx = vol_id2idx(ubi, vol_id); 1074 spin_lock(&ubi->volumes_lock); 1075 /* 1076 * Note, we may race with volume deletion, which means that the volume 1077 * this logical eraseblock belongs to might be being deleted. Since the 1078 * volume deletion un-maps all the volume's logical eraseblocks, it will 1079 * be locked in 'ubi_wl_put_peb()' and wait for the WL worker to finish. 1080 */ 1081 vol = ubi->volumes[idx]; 1082 spin_unlock(&ubi->volumes_lock); 1083 if (!vol) { 1084 /* No need to do further work, cancel */ 1085 dbg_wl("volume %d is being removed, cancel", vol_id); 1086 return MOVE_CANCEL_RACE; 1087 } 1088 1089 /* 1090 * We do not want anybody to write to this logical eraseblock while we 1091 * are moving it, so lock it. 1092 * 1093 * Note, we are using non-waiting locking here, because we cannot sleep 1094 * on the LEB, since it may cause deadlocks. Indeed, imagine a task is 1095 * unmapping the LEB which is mapped to the PEB we are going to move 1096 * (@from). This task locks the LEB and goes sleep in the 1097 * 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are 1098 * holding @ubi->move_mutex and go sleep on the LEB lock. So, if the 1099 * LEB is already locked, we just do not move it and return 1100 * %MOVE_RETRY. Note, we do not return %MOVE_CANCEL_RACE here because 1101 * we do not know the reasons of the contention - it may be just a 1102 * normal I/O on this LEB, so we want to re-try. 1103 */ 1104 err = leb_write_trylock(ubi, vol_id, lnum); 1105 if (err) { 1106 dbg_wl("contention on LEB %d:%d, cancel", vol_id, lnum); 1107 return MOVE_RETRY; 1108 } 1109 1110 /* 1111 * The LEB might have been put meanwhile, and the task which put it is 1112 * probably waiting on @ubi->move_mutex. No need to continue the work, 1113 * cancel it. 1114 */ 1115 if (vol->eba_tbl[lnum] != from) { 1116 dbg_wl("LEB %d:%d is no longer mapped to PEB %d, mapped to PEB %d, cancel", 1117 vol_id, lnum, from, vol->eba_tbl[lnum]); 1118 err = MOVE_CANCEL_RACE; 1119 goto out_unlock_leb; 1120 } 1121 1122 /* 1123 * OK, now the LEB is locked and we can safely start moving it. Since 1124 * this function utilizes the @ubi->peb_buf buffer which is shared 1125 * with some other functions - we lock the buffer by taking the 1126 * @ubi->buf_mutex. 1127 */ 1128 mutex_lock(&ubi->buf_mutex); 1129 dbg_wl("read %d bytes of data", aldata_size); 1130 err = ubi_io_read_data(ubi, ubi->peb_buf, from, 0, aldata_size); 1131 if (err && err != UBI_IO_BITFLIPS) { 1132 ubi_warn(ubi, "error %d while reading data from PEB %d", 1133 err, from); 1134 err = MOVE_SOURCE_RD_ERR; 1135 goto out_unlock_buf; 1136 } 1137 1138 /* 1139 * Now we have got to calculate how much data we have to copy. In 1140 * case of a static volume it is fairly easy - the VID header contains 1141 * the data size. In case of a dynamic volume it is more difficult - we 1142 * have to read the contents, cut 0xFF bytes from the end and copy only 1143 * the first part. We must do this to avoid writing 0xFF bytes as it 1144 * may have some side-effects. And not only this. It is important not 1145 * to include those 0xFFs to CRC because later the they may be filled 1146 * by data. 1147 */ 1148 if (vid_hdr->vol_type == UBI_VID_DYNAMIC) 1149 aldata_size = data_size = 1150 ubi_calc_data_len(ubi, ubi->peb_buf, data_size); 1151 1152 cond_resched(); 1153 crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size); 1154 cond_resched(); 1155 1156 /* 1157 * It may turn out to be that the whole @from physical eraseblock 1158 * contains only 0xFF bytes. Then we have to only write the VID header 1159 * and do not write any data. This also means we should not set 1160 * @vid_hdr->copy_flag, @vid_hdr->data_size, and @vid_hdr->data_crc. 1161 */ 1162 if (data_size > 0) { 1163 vid_hdr->copy_flag = 1; 1164 vid_hdr->data_size = cpu_to_be32(data_size); 1165 vid_hdr->data_crc = cpu_to_be32(crc); 1166 } 1167 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); 1168 1169 err = ubi_io_write_vid_hdr(ubi, to, vid_hdr); 1170 if (err) { 1171 if (err == -EIO) 1172 err = MOVE_TARGET_WR_ERR; 1173 goto out_unlock_buf; 1174 } 1175 1176 cond_resched(); 1177 1178 /* Read the VID header back and check if it was written correctly */ 1179 err = ubi_io_read_vid_hdr(ubi, to, vid_hdr, 1); 1180 if (err) { 1181 if (err != UBI_IO_BITFLIPS) { 1182 ubi_warn(ubi, "error %d while reading VID header back from PEB %d", 1183 err, to); 1184 if (is_error_sane(err)) 1185 err = MOVE_TARGET_RD_ERR; 1186 } else 1187 err = MOVE_TARGET_BITFLIPS; 1188 goto out_unlock_buf; 1189 } 1190 1191 if (data_size > 0) { 1192 err = ubi_io_write_data(ubi, ubi->peb_buf, to, 0, aldata_size); 1193 if (err) { 1194 if (err == -EIO) 1195 err = MOVE_TARGET_WR_ERR; 1196 goto out_unlock_buf; 1197 } 1198 1199 cond_resched(); 1200 1201 /* 1202 * We've written the data and are going to read it back to make 1203 * sure it was written correctly. 1204 */ 1205 memset(ubi->peb_buf, 0xFF, aldata_size); 1206 err = ubi_io_read_data(ubi, ubi->peb_buf, to, 0, aldata_size); 1207 if (err) { 1208 if (err != UBI_IO_BITFLIPS) { 1209 ubi_warn(ubi, "error %d while reading data back from PEB %d", 1210 err, to); 1211 if (is_error_sane(err)) 1212 err = MOVE_TARGET_RD_ERR; 1213 } else 1214 err = MOVE_TARGET_BITFLIPS; 1215 goto out_unlock_buf; 1216 } 1217 1218 cond_resched(); 1219 1220 if (crc != crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size)) { 1221 ubi_warn(ubi, "read data back from PEB %d and it is different", 1222 to); 1223 err = -EINVAL; 1224 goto out_unlock_buf; 1225 } 1226 } 1227 1228 ubi_assert(vol->eba_tbl[lnum] == from); 1229 down_read(&ubi->fm_eba_sem); 1230 vol->eba_tbl[lnum] = to; 1231 up_read(&ubi->fm_eba_sem); 1232 1233 out_unlock_buf: 1234 mutex_unlock(&ubi->buf_mutex); 1235 out_unlock_leb: 1236 leb_write_unlock(ubi, vol_id, lnum); 1237 return err; 1238 } 1239 1240 /** 1241 * print_rsvd_warning - warn about not having enough reserved PEBs. 1242 * @ubi: UBI device description object 1243 * 1244 * This is a helper function for 'ubi_eba_init()' which is called when UBI 1245 * cannot reserve enough PEBs for bad block handling. This function makes a 1246 * decision whether we have to print a warning or not. The algorithm is as 1247 * follows: 1248 * o if this is a new UBI image, then just print the warning 1249 * o if this is an UBI image which has already been used for some time, print 1250 * a warning only if we can reserve less than 10% of the expected amount of 1251 * the reserved PEB. 1252 * 1253 * The idea is that when UBI is used, PEBs become bad, and the reserved pool 1254 * of PEBs becomes smaller, which is normal and we do not want to scare users 1255 * with a warning every time they attach the MTD device. This was an issue 1256 * reported by real users. 1257 */ 1258 static void print_rsvd_warning(struct ubi_device *ubi, 1259 struct ubi_attach_info *ai) 1260 { 1261 /* 1262 * The 1 << 18 (256KiB) number is picked randomly, just a reasonably 1263 * large number to distinguish between newly flashed and used images. 1264 */ 1265 if (ai->max_sqnum > (1 << 18)) { 1266 int min = ubi->beb_rsvd_level / 10; 1267 1268 if (!min) 1269 min = 1; 1270 if (ubi->beb_rsvd_pebs > min) 1271 return; 1272 } 1273 1274 ubi_warn(ubi, "cannot reserve enough PEBs for bad PEB handling, reserved %d, need %d", 1275 ubi->beb_rsvd_pebs, ubi->beb_rsvd_level); 1276 if (ubi->corr_peb_count) 1277 ubi_warn(ubi, "%d PEBs are corrupted and not used", 1278 ubi->corr_peb_count); 1279 } 1280 1281 /** 1282 * self_check_eba - run a self check on the EBA table constructed by fastmap. 1283 * @ubi: UBI device description object 1284 * @ai_fastmap: UBI attach info object created by fastmap 1285 * @ai_scan: UBI attach info object created by scanning 1286 * 1287 * Returns < 0 in case of an internal error, 0 otherwise. 1288 * If a bad EBA table entry was found it will be printed out and 1289 * ubi_assert() triggers. 1290 */ 1291 int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap, 1292 struct ubi_attach_info *ai_scan) 1293 { 1294 int i, j, num_volumes, ret = 0; 1295 int **scan_eba, **fm_eba; 1296 struct ubi_ainf_volume *av; 1297 struct ubi_volume *vol; 1298 struct ubi_ainf_peb *aeb; 1299 struct rb_node *rb; 1300 1301 num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT; 1302 1303 scan_eba = kmalloc(sizeof(*scan_eba) * num_volumes, GFP_KERNEL); 1304 if (!scan_eba) 1305 return -ENOMEM; 1306 1307 fm_eba = kmalloc(sizeof(*fm_eba) * num_volumes, GFP_KERNEL); 1308 if (!fm_eba) { 1309 kfree(scan_eba); 1310 return -ENOMEM; 1311 } 1312 1313 for (i = 0; i < num_volumes; i++) { 1314 vol = ubi->volumes[i]; 1315 if (!vol) 1316 continue; 1317 1318 scan_eba[i] = kmalloc(vol->reserved_pebs * sizeof(**scan_eba), 1319 GFP_KERNEL); 1320 if (!scan_eba[i]) { 1321 ret = -ENOMEM; 1322 goto out_free; 1323 } 1324 1325 fm_eba[i] = kmalloc(vol->reserved_pebs * sizeof(**fm_eba), 1326 GFP_KERNEL); 1327 if (!fm_eba[i]) { 1328 ret = -ENOMEM; 1329 goto out_free; 1330 } 1331 1332 for (j = 0; j < vol->reserved_pebs; j++) 1333 scan_eba[i][j] = fm_eba[i][j] = UBI_LEB_UNMAPPED; 1334 1335 av = ubi_find_av(ai_scan, idx2vol_id(ubi, i)); 1336 if (!av) 1337 continue; 1338 1339 ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) 1340 scan_eba[i][aeb->lnum] = aeb->pnum; 1341 1342 av = ubi_find_av(ai_fastmap, idx2vol_id(ubi, i)); 1343 if (!av) 1344 continue; 1345 1346 ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) 1347 fm_eba[i][aeb->lnum] = aeb->pnum; 1348 1349 for (j = 0; j < vol->reserved_pebs; j++) { 1350 if (scan_eba[i][j] != fm_eba[i][j]) { 1351 if (scan_eba[i][j] == UBI_LEB_UNMAPPED || 1352 fm_eba[i][j] == UBI_LEB_UNMAPPED) 1353 continue; 1354 1355 ubi_err(ubi, "LEB:%i:%i is PEB:%i instead of %i!", 1356 vol->vol_id, i, fm_eba[i][j], 1357 scan_eba[i][j]); 1358 ubi_assert(0); 1359 } 1360 } 1361 } 1362 1363 out_free: 1364 for (i = 0; i < num_volumes; i++) { 1365 if (!ubi->volumes[i]) 1366 continue; 1367 1368 kfree(scan_eba[i]); 1369 kfree(fm_eba[i]); 1370 } 1371 1372 kfree(scan_eba); 1373 kfree(fm_eba); 1374 return ret; 1375 } 1376 1377 /** 1378 * ubi_eba_init - initialize the EBA sub-system using attaching information. 1379 * @ubi: UBI device description object 1380 * @ai: attaching information 1381 * 1382 * This function returns zero in case of success and a negative error code in 1383 * case of failure. 1384 */ 1385 int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai) 1386 { 1387 int i, j, err, num_volumes; 1388 struct ubi_ainf_volume *av; 1389 struct ubi_volume *vol; 1390 struct ubi_ainf_peb *aeb; 1391 struct rb_node *rb; 1392 1393 dbg_eba("initialize EBA sub-system"); 1394 1395 spin_lock_init(&ubi->ltree_lock); 1396 mutex_init(&ubi->alc_mutex); 1397 ubi->ltree = RB_ROOT; 1398 1399 ubi->global_sqnum = ai->max_sqnum + 1; 1400 num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT; 1401 1402 for (i = 0; i < num_volumes; i++) { 1403 vol = ubi->volumes[i]; 1404 if (!vol) 1405 continue; 1406 1407 cond_resched(); 1408 1409 vol->eba_tbl = kmalloc(vol->reserved_pebs * sizeof(int), 1410 GFP_KERNEL); 1411 if (!vol->eba_tbl) { 1412 err = -ENOMEM; 1413 goto out_free; 1414 } 1415 1416 for (j = 0; j < vol->reserved_pebs; j++) 1417 vol->eba_tbl[j] = UBI_LEB_UNMAPPED; 1418 1419 av = ubi_find_av(ai, idx2vol_id(ubi, i)); 1420 if (!av) 1421 continue; 1422 1423 ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) { 1424 if (aeb->lnum >= vol->reserved_pebs) 1425 /* 1426 * This may happen in case of an unclean reboot 1427 * during re-size. 1428 */ 1429 ubi_move_aeb_to_list(av, aeb, &ai->erase); 1430 else 1431 vol->eba_tbl[aeb->lnum] = aeb->pnum; 1432 } 1433 } 1434 1435 if (ubi->avail_pebs < EBA_RESERVED_PEBS) { 1436 ubi_err(ubi, "no enough physical eraseblocks (%d, need %d)", 1437 ubi->avail_pebs, EBA_RESERVED_PEBS); 1438 if (ubi->corr_peb_count) 1439 ubi_err(ubi, "%d PEBs are corrupted and not used", 1440 ubi->corr_peb_count); 1441 err = -ENOSPC; 1442 goto out_free; 1443 } 1444 ubi->avail_pebs -= EBA_RESERVED_PEBS; 1445 ubi->rsvd_pebs += EBA_RESERVED_PEBS; 1446 1447 if (ubi->bad_allowed) { 1448 ubi_calculate_reserved(ubi); 1449 1450 if (ubi->avail_pebs < ubi->beb_rsvd_level) { 1451 /* No enough free physical eraseblocks */ 1452 ubi->beb_rsvd_pebs = ubi->avail_pebs; 1453 print_rsvd_warning(ubi, ai); 1454 } else 1455 ubi->beb_rsvd_pebs = ubi->beb_rsvd_level; 1456 1457 ubi->avail_pebs -= ubi->beb_rsvd_pebs; 1458 ubi->rsvd_pebs += ubi->beb_rsvd_pebs; 1459 } 1460 1461 dbg_eba("EBA sub-system is initialized"); 1462 return 0; 1463 1464 out_free: 1465 for (i = 0; i < num_volumes; i++) { 1466 if (!ubi->volumes[i]) 1467 continue; 1468 kfree(ubi->volumes[i]->eba_tbl); 1469 ubi->volumes[i]->eba_tbl = NULL; 1470 } 1471 return err; 1472 } 1473