1 /* 2 * Copyright (c) International Business Machines Corp., 2006 3 * 4 * SPDX-License-Identifier: GPL-2.0+ 5 * 6 * Author: Artem Bityutskiy (Битюцкий Артём) 7 */ 8 9 /* 10 * The UBI Eraseblock Association (EBA) unit. 11 * 12 * This unit is responsible for I/O to/from logical eraseblock. 13 * 14 * Although in this implementation the EBA table is fully kept and managed in 15 * RAM, which assumes poor scalability, it might be (partially) maintained on 16 * flash in future implementations. 17 * 18 * The EBA unit implements per-logical eraseblock locking. Before accessing a 19 * logical eraseblock it is locked for reading or writing. The per-logical 20 * eraseblock locking is implemented by means of the lock tree. The lock tree 21 * is an RB-tree which refers all the currently locked logical eraseblocks. The 22 * lock tree elements are &struct ubi_ltree_entry objects. They are indexed by 23 * (@vol_id, @lnum) pairs. 24 * 25 * EBA also maintains the global sequence counter which is incremented each 26 * time a logical eraseblock is mapped to a physical eraseblock and it is 27 * stored in the volume identifier header. This means that each VID header has 28 * a unique sequence number. The sequence number is only increased an we assume 29 * 64 bits is enough to never overflow. 30 */ 31 32 #ifdef UBI_LINUX 33 #include <linux/slab.h> 34 #include <linux/crc32.h> 35 #include <linux/err.h> 36 #endif 37 38 #include <ubi_uboot.h> 39 #include "ubi.h" 40 41 /* Number of physical eraseblocks reserved for atomic LEB change operation */ 42 #define EBA_RESERVED_PEBS 1 43 44 /** 45 * next_sqnum - get next sequence number. 46 * @ubi: UBI device description object 47 * 48 * This function returns next sequence number to use, which is just the current 49 * global sequence counter value. It also increases the global sequence 50 * counter. 51 */ 52 static unsigned long long next_sqnum(struct ubi_device *ubi) 53 { 54 unsigned long long sqnum; 55 56 spin_lock(&ubi->ltree_lock); 57 sqnum = ubi->global_sqnum++; 58 spin_unlock(&ubi->ltree_lock); 59 60 return sqnum; 61 } 62 63 /** 64 * ubi_get_compat - get compatibility flags of a volume. 65 * @ubi: UBI device description object 66 * @vol_id: volume ID 67 * 68 * This function returns compatibility flags for an internal volume. User 69 * volumes have no compatibility flags, so %0 is returned. 70 */ 71 static int ubi_get_compat(const struct ubi_device *ubi, int vol_id) 72 { 73 if (vol_id == UBI_LAYOUT_VOLUME_ID) 74 return UBI_LAYOUT_VOLUME_COMPAT; 75 return 0; 76 } 77 78 /** 79 * ltree_lookup - look up the lock tree. 80 * @ubi: UBI device description object 81 * @vol_id: volume ID 82 * @lnum: logical eraseblock number 83 * 84 * This function returns a pointer to the corresponding &struct ubi_ltree_entry 85 * object if the logical eraseblock is locked and %NULL if it is not. 86 * @ubi->ltree_lock has to be locked. 87 */ 88 static struct ubi_ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id, 89 int lnum) 90 { 91 struct rb_node *p; 92 93 p = ubi->ltree.rb_node; 94 while (p) { 95 struct ubi_ltree_entry *le; 96 97 le = rb_entry(p, struct ubi_ltree_entry, rb); 98 99 if (vol_id < le->vol_id) 100 p = p->rb_left; 101 else if (vol_id > le->vol_id) 102 p = p->rb_right; 103 else { 104 if (lnum < le->lnum) 105 p = p->rb_left; 106 else if (lnum > le->lnum) 107 p = p->rb_right; 108 else 109 return le; 110 } 111 } 112 113 return NULL; 114 } 115 116 /** 117 * ltree_add_entry - add new entry to the lock tree. 118 * @ubi: UBI device description object 119 * @vol_id: volume ID 120 * @lnum: logical eraseblock number 121 * 122 * This function adds new entry for logical eraseblock (@vol_id, @lnum) to the 123 * lock tree. If such entry is already there, its usage counter is increased. 124 * Returns pointer to the lock tree entry or %-ENOMEM if memory allocation 125 * failed. 126 */ 127 static struct ubi_ltree_entry *ltree_add_entry(struct ubi_device *ubi, 128 int vol_id, int lnum) 129 { 130 struct ubi_ltree_entry *le, *le1, *le_free; 131 132 le = kmalloc(sizeof(struct ubi_ltree_entry), GFP_NOFS); 133 if (!le) 134 return ERR_PTR(-ENOMEM); 135 136 le->users = 0; 137 init_rwsem(&le->mutex); 138 le->vol_id = vol_id; 139 le->lnum = lnum; 140 141 spin_lock(&ubi->ltree_lock); 142 le1 = ltree_lookup(ubi, vol_id, lnum); 143 144 if (le1) { 145 /* 146 * This logical eraseblock is already locked. The newly 147 * allocated lock entry is not needed. 148 */ 149 le_free = le; 150 le = le1; 151 } else { 152 struct rb_node **p, *parent = NULL; 153 154 /* 155 * No lock entry, add the newly allocated one to the 156 * @ubi->ltree RB-tree. 157 */ 158 le_free = NULL; 159 160 p = &ubi->ltree.rb_node; 161 while (*p) { 162 parent = *p; 163 le1 = rb_entry(parent, struct ubi_ltree_entry, rb); 164 165 if (vol_id < le1->vol_id) 166 p = &(*p)->rb_left; 167 else if (vol_id > le1->vol_id) 168 p = &(*p)->rb_right; 169 else { 170 ubi_assert(lnum != le1->lnum); 171 if (lnum < le1->lnum) 172 p = &(*p)->rb_left; 173 else 174 p = &(*p)->rb_right; 175 } 176 } 177 178 rb_link_node(&le->rb, parent, p); 179 rb_insert_color(&le->rb, &ubi->ltree); 180 } 181 le->users += 1; 182 spin_unlock(&ubi->ltree_lock); 183 184 if (le_free) 185 kfree(le_free); 186 187 return le; 188 } 189 190 /** 191 * leb_read_lock - lock logical eraseblock for reading. 192 * @ubi: UBI device description object 193 * @vol_id: volume ID 194 * @lnum: logical eraseblock number 195 * 196 * This function locks a logical eraseblock for reading. Returns zero in case 197 * of success and a negative error code in case of failure. 198 */ 199 static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum) 200 { 201 struct ubi_ltree_entry *le; 202 203 le = ltree_add_entry(ubi, vol_id, lnum); 204 if (IS_ERR(le)) 205 return PTR_ERR(le); 206 down_read(&le->mutex); 207 return 0; 208 } 209 210 /** 211 * leb_read_unlock - unlock logical eraseblock. 212 * @ubi: UBI device description object 213 * @vol_id: volume ID 214 * @lnum: logical eraseblock number 215 */ 216 static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum) 217 { 218 int _free = 0; 219 struct ubi_ltree_entry *le; 220 221 spin_lock(&ubi->ltree_lock); 222 le = ltree_lookup(ubi, vol_id, lnum); 223 le->users -= 1; 224 ubi_assert(le->users >= 0); 225 if (le->users == 0) { 226 rb_erase(&le->rb, &ubi->ltree); 227 _free = 1; 228 } 229 spin_unlock(&ubi->ltree_lock); 230 231 up_read(&le->mutex); 232 if (_free) 233 kfree(le); 234 } 235 236 /** 237 * leb_write_lock - lock logical eraseblock for writing. 238 * @ubi: UBI device description object 239 * @vol_id: volume ID 240 * @lnum: logical eraseblock number 241 * 242 * This function locks a logical eraseblock for writing. Returns zero in case 243 * of success and a negative error code in case of failure. 244 */ 245 static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum) 246 { 247 struct ubi_ltree_entry *le; 248 249 le = ltree_add_entry(ubi, vol_id, lnum); 250 if (IS_ERR(le)) 251 return PTR_ERR(le); 252 down_write(&le->mutex); 253 return 0; 254 } 255 256 /** 257 * leb_write_lock - lock logical eraseblock for writing. 258 * @ubi: UBI device description object 259 * @vol_id: volume ID 260 * @lnum: logical eraseblock number 261 * 262 * This function locks a logical eraseblock for writing if there is no 263 * contention and does nothing if there is contention. Returns %0 in case of 264 * success, %1 in case of contention, and and a negative error code in case of 265 * failure. 266 */ 267 static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum) 268 { 269 int _free; 270 struct ubi_ltree_entry *le; 271 272 le = ltree_add_entry(ubi, vol_id, lnum); 273 if (IS_ERR(le)) 274 return PTR_ERR(le); 275 if (down_write_trylock(&le->mutex)) 276 return 0; 277 278 /* Contention, cancel */ 279 spin_lock(&ubi->ltree_lock); 280 le->users -= 1; 281 ubi_assert(le->users >= 0); 282 if (le->users == 0) { 283 rb_erase(&le->rb, &ubi->ltree); 284 _free = 1; 285 } else 286 _free = 0; 287 spin_unlock(&ubi->ltree_lock); 288 if (_free) 289 kfree(le); 290 291 return 1; 292 } 293 294 /** 295 * leb_write_unlock - unlock logical eraseblock. 296 * @ubi: UBI device description object 297 * @vol_id: volume ID 298 * @lnum: logical eraseblock number 299 */ 300 static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum) 301 { 302 int _free; 303 struct ubi_ltree_entry *le; 304 305 spin_lock(&ubi->ltree_lock); 306 le = ltree_lookup(ubi, vol_id, lnum); 307 le->users -= 1; 308 ubi_assert(le->users >= 0); 309 if (le->users == 0) { 310 rb_erase(&le->rb, &ubi->ltree); 311 _free = 1; 312 } else 313 _free = 0; 314 spin_unlock(&ubi->ltree_lock); 315 316 up_write(&le->mutex); 317 if (_free) 318 kfree(le); 319 } 320 321 /** 322 * ubi_eba_unmap_leb - un-map logical eraseblock. 323 * @ubi: UBI device description object 324 * @vol: volume description object 325 * @lnum: logical eraseblock number 326 * 327 * This function un-maps logical eraseblock @lnum and schedules corresponding 328 * physical eraseblock for erasure. Returns zero in case of success and a 329 * negative error code in case of failure. 330 */ 331 int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol, 332 int lnum) 333 { 334 int err, pnum, vol_id = vol->vol_id; 335 336 if (ubi->ro_mode) 337 return -EROFS; 338 339 err = leb_write_lock(ubi, vol_id, lnum); 340 if (err) 341 return err; 342 343 pnum = vol->eba_tbl[lnum]; 344 if (pnum < 0) 345 /* This logical eraseblock is already unmapped */ 346 goto out_unlock; 347 348 dbg_eba("erase LEB %d:%d, PEB %d", vol_id, lnum, pnum); 349 350 vol->eba_tbl[lnum] = UBI_LEB_UNMAPPED; 351 err = ubi_wl_put_peb(ubi, pnum, 0); 352 353 out_unlock: 354 leb_write_unlock(ubi, vol_id, lnum); 355 return err; 356 } 357 358 /** 359 * ubi_eba_read_leb - read data. 360 * @ubi: UBI device description object 361 * @vol: volume description object 362 * @lnum: logical eraseblock number 363 * @buf: buffer to store the read data 364 * @offset: offset from where to read 365 * @len: how many bytes to read 366 * @check: data CRC check flag 367 * 368 * If the logical eraseblock @lnum is unmapped, @buf is filled with 0xFF 369 * bytes. The @check flag only makes sense for static volumes and forces 370 * eraseblock data CRC checking. 371 * 372 * In case of success this function returns zero. In case of a static volume, 373 * if data CRC mismatches - %-EBADMSG is returned. %-EBADMSG may also be 374 * returned for any volume type if an ECC error was detected by the MTD device 375 * driver. Other negative error cored may be returned in case of other errors. 376 */ 377 int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, 378 void *buf, int offset, int len, int check) 379 { 380 int err, pnum, scrub = 0, vol_id = vol->vol_id; 381 struct ubi_vid_hdr *vid_hdr; 382 uint32_t uninitialized_var(crc); 383 384 err = leb_read_lock(ubi, vol_id, lnum); 385 if (err) 386 return err; 387 388 pnum = vol->eba_tbl[lnum]; 389 if (pnum < 0) { 390 /* 391 * The logical eraseblock is not mapped, fill the whole buffer 392 * with 0xFF bytes. The exception is static volumes for which 393 * it is an error to read unmapped logical eraseblocks. 394 */ 395 dbg_eba("read %d bytes from offset %d of LEB %d:%d (unmapped)", 396 len, offset, vol_id, lnum); 397 leb_read_unlock(ubi, vol_id, lnum); 398 ubi_assert(vol->vol_type != UBI_STATIC_VOLUME); 399 memset(buf, 0xFF, len); 400 return 0; 401 } 402 403 dbg_eba("read %d bytes from offset %d of LEB %d:%d, PEB %d", 404 len, offset, vol_id, lnum, pnum); 405 406 if (vol->vol_type == UBI_DYNAMIC_VOLUME) 407 check = 0; 408 409 retry: 410 if (check) { 411 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); 412 if (!vid_hdr) { 413 err = -ENOMEM; 414 goto out_unlock; 415 } 416 417 err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1); 418 if (err && err != UBI_IO_BITFLIPS) { 419 if (err > 0) { 420 /* 421 * The header is either absent or corrupted. 422 * The former case means there is a bug - 423 * switch to read-only mode just in case. 424 * The latter case means a real corruption - we 425 * may try to recover data. FIXME: but this is 426 * not implemented. 427 */ 428 if (err == UBI_IO_BAD_VID_HDR) { 429 ubi_warn("bad VID header at PEB %d, LEB" 430 "%d:%d", pnum, vol_id, lnum); 431 err = -EBADMSG; 432 } else 433 ubi_ro_mode(ubi); 434 } 435 goto out_free; 436 } else if (err == UBI_IO_BITFLIPS) 437 scrub = 1; 438 439 ubi_assert(lnum < be32_to_cpu(vid_hdr->used_ebs)); 440 ubi_assert(len == be32_to_cpu(vid_hdr->data_size)); 441 442 crc = be32_to_cpu(vid_hdr->data_crc); 443 ubi_free_vid_hdr(ubi, vid_hdr); 444 } 445 446 err = ubi_io_read_data(ubi, buf, pnum, offset, len); 447 if (err) { 448 if (err == UBI_IO_BITFLIPS) { 449 scrub = 1; 450 err = 0; 451 } else if (mtd_is_eccerr(err)) { 452 if (vol->vol_type == UBI_DYNAMIC_VOLUME) 453 goto out_unlock; 454 scrub = 1; 455 if (!check) { 456 ubi_msg("force data checking"); 457 check = 1; 458 goto retry; 459 } 460 } else 461 goto out_unlock; 462 } 463 464 if (check) { 465 uint32_t crc1 = crc32(UBI_CRC32_INIT, buf, len); 466 if (crc1 != crc) { 467 ubi_warn("CRC error: calculated %#08x, must be %#08x", 468 crc1, crc); 469 err = -EBADMSG; 470 goto out_unlock; 471 } 472 } 473 474 if (scrub) 475 err = ubi_wl_scrub_peb(ubi, pnum); 476 477 leb_read_unlock(ubi, vol_id, lnum); 478 return err; 479 480 out_free: 481 ubi_free_vid_hdr(ubi, vid_hdr); 482 out_unlock: 483 leb_read_unlock(ubi, vol_id, lnum); 484 return err; 485 } 486 487 /** 488 * recover_peb - recover from write failure. 489 * @ubi: UBI device description object 490 * @pnum: the physical eraseblock to recover 491 * @vol_id: volume ID 492 * @lnum: logical eraseblock number 493 * @buf: data which was not written because of the write failure 494 * @offset: offset of the failed write 495 * @len: how many bytes should have been written 496 * 497 * This function is called in case of a write failure and moves all good data 498 * from the potentially bad physical eraseblock to a good physical eraseblock. 499 * This function also writes the data which was not written due to the failure. 500 * Returns new physical eraseblock number in case of success, and a negative 501 * error code in case of failure. 502 */ 503 static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum, 504 const void *buf, int offset, int len) 505 { 506 int err, idx = vol_id2idx(ubi, vol_id), new_pnum, data_size, tries = 0; 507 struct ubi_volume *vol = ubi->volumes[idx]; 508 struct ubi_vid_hdr *vid_hdr; 509 510 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); 511 if (!vid_hdr) { 512 return -ENOMEM; 513 } 514 515 mutex_lock(&ubi->buf_mutex); 516 517 retry: 518 new_pnum = ubi_wl_get_peb(ubi, UBI_UNKNOWN); 519 if (new_pnum < 0) { 520 mutex_unlock(&ubi->buf_mutex); 521 ubi_free_vid_hdr(ubi, vid_hdr); 522 return new_pnum; 523 } 524 525 ubi_msg("recover PEB %d, move data to PEB %d", pnum, new_pnum); 526 527 err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1); 528 if (err && err != UBI_IO_BITFLIPS) { 529 if (err > 0) 530 err = -EIO; 531 goto out_put; 532 } 533 534 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); 535 err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr); 536 if (err) 537 goto write_error; 538 539 data_size = offset + len; 540 memset(ubi->peb_buf1 + offset, 0xFF, len); 541 542 /* Read everything before the area where the write failure happened */ 543 if (offset > 0) { 544 err = ubi_io_read_data(ubi, ubi->peb_buf1, pnum, 0, offset); 545 if (err && err != UBI_IO_BITFLIPS) 546 goto out_put; 547 } 548 549 memcpy(ubi->peb_buf1 + offset, buf, len); 550 551 err = ubi_io_write_data(ubi, ubi->peb_buf1, new_pnum, 0, data_size); 552 if (err) 553 goto write_error; 554 555 mutex_unlock(&ubi->buf_mutex); 556 ubi_free_vid_hdr(ubi, vid_hdr); 557 558 vol->eba_tbl[lnum] = new_pnum; 559 ubi_wl_put_peb(ubi, pnum, 1); 560 561 ubi_msg("data was successfully recovered"); 562 return 0; 563 564 out_put: 565 mutex_unlock(&ubi->buf_mutex); 566 ubi_wl_put_peb(ubi, new_pnum, 1); 567 ubi_free_vid_hdr(ubi, vid_hdr); 568 return err; 569 570 write_error: 571 /* 572 * Bad luck? This physical eraseblock is bad too? Crud. Let's try to 573 * get another one. 574 */ 575 ubi_warn("failed to write to PEB %d", new_pnum); 576 ubi_wl_put_peb(ubi, new_pnum, 1); 577 if (++tries > UBI_IO_RETRIES) { 578 mutex_unlock(&ubi->buf_mutex); 579 ubi_free_vid_hdr(ubi, vid_hdr); 580 return err; 581 } 582 ubi_msg("try again"); 583 goto retry; 584 } 585 586 /** 587 * ubi_eba_write_leb - write data to dynamic volume. 588 * @ubi: UBI device description object 589 * @vol: volume description object 590 * @lnum: logical eraseblock number 591 * @buf: the data to write 592 * @offset: offset within the logical eraseblock where to write 593 * @len: how many bytes to write 594 * @dtype: data type 595 * 596 * This function writes data to logical eraseblock @lnum of a dynamic volume 597 * @vol. Returns zero in case of success and a negative error code in case 598 * of failure. In case of error, it is possible that something was still 599 * written to the flash media, but may be some garbage. 600 */ 601 int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, 602 const void *buf, int offset, int len, int dtype) 603 { 604 int err, pnum, tries = 0, vol_id = vol->vol_id; 605 struct ubi_vid_hdr *vid_hdr; 606 607 if (ubi->ro_mode) 608 return -EROFS; 609 610 err = leb_write_lock(ubi, vol_id, lnum); 611 if (err) 612 return err; 613 614 pnum = vol->eba_tbl[lnum]; 615 if (pnum >= 0) { 616 dbg_eba("write %d bytes at offset %d of LEB %d:%d, PEB %d", 617 len, offset, vol_id, lnum, pnum); 618 619 err = ubi_io_write_data(ubi, buf, pnum, offset, len); 620 if (err) { 621 ubi_warn("failed to write data to PEB %d", pnum); 622 if (err == -EIO && ubi->bad_allowed) 623 err = recover_peb(ubi, pnum, vol_id, lnum, buf, 624 offset, len); 625 if (err) 626 ubi_ro_mode(ubi); 627 } 628 leb_write_unlock(ubi, vol_id, lnum); 629 return err; 630 } 631 632 /* 633 * The logical eraseblock is not mapped. We have to get a free physical 634 * eraseblock and write the volume identifier header there first. 635 */ 636 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); 637 if (!vid_hdr) { 638 leb_write_unlock(ubi, vol_id, lnum); 639 return -ENOMEM; 640 } 641 642 vid_hdr->vol_type = UBI_VID_DYNAMIC; 643 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); 644 vid_hdr->vol_id = cpu_to_be32(vol_id); 645 vid_hdr->lnum = cpu_to_be32(lnum); 646 vid_hdr->compat = ubi_get_compat(ubi, vol_id); 647 vid_hdr->data_pad = cpu_to_be32(vol->data_pad); 648 649 retry: 650 pnum = ubi_wl_get_peb(ubi, dtype); 651 if (pnum < 0) { 652 ubi_free_vid_hdr(ubi, vid_hdr); 653 leb_write_unlock(ubi, vol_id, lnum); 654 return pnum; 655 } 656 657 dbg_eba("write VID hdr and %d bytes at offset %d of LEB %d:%d, PEB %d", 658 len, offset, vol_id, lnum, pnum); 659 660 err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr); 661 if (err) { 662 ubi_warn("failed to write VID header to LEB %d:%d, PEB %d", 663 vol_id, lnum, pnum); 664 goto write_error; 665 } 666 667 if (len) { 668 err = ubi_io_write_data(ubi, buf, pnum, offset, len); 669 if (err) { 670 ubi_warn("failed to write %d bytes at offset %d of " 671 "LEB %d:%d, PEB %d", len, offset, vol_id, 672 lnum, pnum); 673 goto write_error; 674 } 675 } 676 677 vol->eba_tbl[lnum] = pnum; 678 679 leb_write_unlock(ubi, vol_id, lnum); 680 ubi_free_vid_hdr(ubi, vid_hdr); 681 return 0; 682 683 write_error: 684 if (err != -EIO || !ubi->bad_allowed) { 685 ubi_ro_mode(ubi); 686 leb_write_unlock(ubi, vol_id, lnum); 687 ubi_free_vid_hdr(ubi, vid_hdr); 688 return err; 689 } 690 691 /* 692 * Fortunately, this is the first write operation to this physical 693 * eraseblock, so just put it and request a new one. We assume that if 694 * this physical eraseblock went bad, the erase code will handle that. 695 */ 696 err = ubi_wl_put_peb(ubi, pnum, 1); 697 if (err || ++tries > UBI_IO_RETRIES) { 698 ubi_ro_mode(ubi); 699 leb_write_unlock(ubi, vol_id, lnum); 700 ubi_free_vid_hdr(ubi, vid_hdr); 701 return err; 702 } 703 704 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); 705 ubi_msg("try another PEB"); 706 goto retry; 707 } 708 709 /** 710 * ubi_eba_write_leb_st - write data to static volume. 711 * @ubi: UBI device description object 712 * @vol: volume description object 713 * @lnum: logical eraseblock number 714 * @buf: data to write 715 * @len: how many bytes to write 716 * @dtype: data type 717 * @used_ebs: how many logical eraseblocks will this volume contain 718 * 719 * This function writes data to logical eraseblock @lnum of static volume 720 * @vol. The @used_ebs argument should contain total number of logical 721 * eraseblock in this static volume. 722 * 723 * When writing to the last logical eraseblock, the @len argument doesn't have 724 * to be aligned to the minimal I/O unit size. Instead, it has to be equivalent 725 * to the real data size, although the @buf buffer has to contain the 726 * alignment. In all other cases, @len has to be aligned. 727 * 728 * It is prohibited to write more then once to logical eraseblocks of static 729 * volumes. This function returns zero in case of success and a negative error 730 * code in case of failure. 731 */ 732 int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol, 733 int lnum, const void *buf, int len, int dtype, 734 int used_ebs) 735 { 736 int err, pnum, tries = 0, data_size = len, vol_id = vol->vol_id; 737 struct ubi_vid_hdr *vid_hdr; 738 uint32_t crc; 739 740 if (ubi->ro_mode) 741 return -EROFS; 742 743 if (lnum == used_ebs - 1) 744 /* If this is the last LEB @len may be unaligned */ 745 len = ALIGN(data_size, ubi->min_io_size); 746 else 747 ubi_assert(!(len & (ubi->min_io_size - 1))); 748 749 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); 750 if (!vid_hdr) 751 return -ENOMEM; 752 753 err = leb_write_lock(ubi, vol_id, lnum); 754 if (err) { 755 ubi_free_vid_hdr(ubi, vid_hdr); 756 return err; 757 } 758 759 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); 760 vid_hdr->vol_id = cpu_to_be32(vol_id); 761 vid_hdr->lnum = cpu_to_be32(lnum); 762 vid_hdr->compat = ubi_get_compat(ubi, vol_id); 763 vid_hdr->data_pad = cpu_to_be32(vol->data_pad); 764 765 crc = crc32(UBI_CRC32_INIT, buf, data_size); 766 vid_hdr->vol_type = UBI_VID_STATIC; 767 vid_hdr->data_size = cpu_to_be32(data_size); 768 vid_hdr->used_ebs = cpu_to_be32(used_ebs); 769 vid_hdr->data_crc = cpu_to_be32(crc); 770 771 retry: 772 pnum = ubi_wl_get_peb(ubi, dtype); 773 if (pnum < 0) { 774 ubi_free_vid_hdr(ubi, vid_hdr); 775 leb_write_unlock(ubi, vol_id, lnum); 776 return pnum; 777 } 778 779 dbg_eba("write VID hdr and %d bytes at LEB %d:%d, PEB %d, used_ebs %d", 780 len, vol_id, lnum, pnum, used_ebs); 781 782 err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr); 783 if (err) { 784 ubi_warn("failed to write VID header to LEB %d:%d, PEB %d", 785 vol_id, lnum, pnum); 786 goto write_error; 787 } 788 789 err = ubi_io_write_data(ubi, buf, pnum, 0, len); 790 if (err) { 791 ubi_warn("failed to write %d bytes of data to PEB %d", 792 len, pnum); 793 goto write_error; 794 } 795 796 ubi_assert(vol->eba_tbl[lnum] < 0); 797 vol->eba_tbl[lnum] = pnum; 798 799 leb_write_unlock(ubi, vol_id, lnum); 800 ubi_free_vid_hdr(ubi, vid_hdr); 801 return 0; 802 803 write_error: 804 if (err != -EIO || !ubi->bad_allowed) { 805 /* 806 * This flash device does not admit of bad eraseblocks or 807 * something nasty and unexpected happened. Switch to read-only 808 * mode just in case. 809 */ 810 ubi_ro_mode(ubi); 811 leb_write_unlock(ubi, vol_id, lnum); 812 ubi_free_vid_hdr(ubi, vid_hdr); 813 return err; 814 } 815 816 err = ubi_wl_put_peb(ubi, pnum, 1); 817 if (err || ++tries > UBI_IO_RETRIES) { 818 ubi_ro_mode(ubi); 819 leb_write_unlock(ubi, vol_id, lnum); 820 ubi_free_vid_hdr(ubi, vid_hdr); 821 return err; 822 } 823 824 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); 825 ubi_msg("try another PEB"); 826 goto retry; 827 } 828 829 /* 830 * ubi_eba_atomic_leb_change - change logical eraseblock atomically. 831 * @ubi: UBI device description object 832 * @vol: volume description object 833 * @lnum: logical eraseblock number 834 * @buf: data to write 835 * @len: how many bytes to write 836 * @dtype: data type 837 * 838 * This function changes the contents of a logical eraseblock atomically. @buf 839 * has to contain new logical eraseblock data, and @len - the length of the 840 * data, which has to be aligned. This function guarantees that in case of an 841 * unclean reboot the old contents is preserved. Returns zero in case of 842 * success and a negative error code in case of failure. 843 * 844 * UBI reserves one LEB for the "atomic LEB change" operation, so only one 845 * LEB change may be done at a time. This is ensured by @ubi->alc_mutex. 846 */ 847 int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol, 848 int lnum, const void *buf, int len, int dtype) 849 { 850 int err, pnum, tries = 0, vol_id = vol->vol_id; 851 struct ubi_vid_hdr *vid_hdr; 852 uint32_t crc; 853 854 if (ubi->ro_mode) 855 return -EROFS; 856 857 if (len == 0) { 858 /* 859 * Special case when data length is zero. In this case the LEB 860 * has to be unmapped and mapped somewhere else. 861 */ 862 err = ubi_eba_unmap_leb(ubi, vol, lnum); 863 if (err) 864 return err; 865 return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0, dtype); 866 } 867 868 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); 869 if (!vid_hdr) 870 return -ENOMEM; 871 872 mutex_lock(&ubi->alc_mutex); 873 err = leb_write_lock(ubi, vol_id, lnum); 874 if (err) 875 goto out_mutex; 876 877 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); 878 vid_hdr->vol_id = cpu_to_be32(vol_id); 879 vid_hdr->lnum = cpu_to_be32(lnum); 880 vid_hdr->compat = ubi_get_compat(ubi, vol_id); 881 vid_hdr->data_pad = cpu_to_be32(vol->data_pad); 882 883 crc = crc32(UBI_CRC32_INIT, buf, len); 884 vid_hdr->vol_type = UBI_VID_DYNAMIC; 885 vid_hdr->data_size = cpu_to_be32(len); 886 vid_hdr->copy_flag = 1; 887 vid_hdr->data_crc = cpu_to_be32(crc); 888 889 retry: 890 pnum = ubi_wl_get_peb(ubi, dtype); 891 if (pnum < 0) { 892 err = pnum; 893 goto out_leb_unlock; 894 } 895 896 dbg_eba("change LEB %d:%d, PEB %d, write VID hdr to PEB %d", 897 vol_id, lnum, vol->eba_tbl[lnum], pnum); 898 899 err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr); 900 if (err) { 901 ubi_warn("failed to write VID header to LEB %d:%d, PEB %d", 902 vol_id, lnum, pnum); 903 goto write_error; 904 } 905 906 err = ubi_io_write_data(ubi, buf, pnum, 0, len); 907 if (err) { 908 ubi_warn("failed to write %d bytes of data to PEB %d", 909 len, pnum); 910 goto write_error; 911 } 912 913 if (vol->eba_tbl[lnum] >= 0) { 914 err = ubi_wl_put_peb(ubi, vol->eba_tbl[lnum], 1); 915 if (err) 916 goto out_leb_unlock; 917 } 918 919 vol->eba_tbl[lnum] = pnum; 920 921 out_leb_unlock: 922 leb_write_unlock(ubi, vol_id, lnum); 923 out_mutex: 924 mutex_unlock(&ubi->alc_mutex); 925 ubi_free_vid_hdr(ubi, vid_hdr); 926 return err; 927 928 write_error: 929 if (err != -EIO || !ubi->bad_allowed) { 930 /* 931 * This flash device does not admit of bad eraseblocks or 932 * something nasty and unexpected happened. Switch to read-only 933 * mode just in case. 934 */ 935 ubi_ro_mode(ubi); 936 goto out_leb_unlock; 937 } 938 939 err = ubi_wl_put_peb(ubi, pnum, 1); 940 if (err || ++tries > UBI_IO_RETRIES) { 941 ubi_ro_mode(ubi); 942 goto out_leb_unlock; 943 } 944 945 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); 946 ubi_msg("try another PEB"); 947 goto retry; 948 } 949 950 /** 951 * ubi_eba_copy_leb - copy logical eraseblock. 952 * @ubi: UBI device description object 953 * @from: physical eraseblock number from where to copy 954 * @to: physical eraseblock number where to copy 955 * @vid_hdr: VID header of the @from physical eraseblock 956 * 957 * This function copies logical eraseblock from physical eraseblock @from to 958 * physical eraseblock @to. The @vid_hdr buffer may be changed by this 959 * function. Returns: 960 * o %0 in case of success; 961 * o %1 if the operation was canceled and should be tried later (e.g., 962 * because a bit-flip was detected at the target PEB); 963 * o %2 if the volume is being deleted and this LEB should not be moved. 964 */ 965 int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, 966 struct ubi_vid_hdr *vid_hdr) 967 { 968 int err, vol_id, lnum, data_size, aldata_size, idx; 969 struct ubi_volume *vol; 970 uint32_t crc; 971 972 vol_id = be32_to_cpu(vid_hdr->vol_id); 973 lnum = be32_to_cpu(vid_hdr->lnum); 974 975 dbg_eba("copy LEB %d:%d, PEB %d to PEB %d", vol_id, lnum, from, to); 976 977 if (vid_hdr->vol_type == UBI_VID_STATIC) { 978 data_size = be32_to_cpu(vid_hdr->data_size); 979 aldata_size = ALIGN(data_size, ubi->min_io_size); 980 } else 981 data_size = aldata_size = 982 ubi->leb_size - be32_to_cpu(vid_hdr->data_pad); 983 984 idx = vol_id2idx(ubi, vol_id); 985 spin_lock(&ubi->volumes_lock); 986 /* 987 * Note, we may race with volume deletion, which means that the volume 988 * this logical eraseblock belongs to might be being deleted. Since the 989 * volume deletion unmaps all the volume's logical eraseblocks, it will 990 * be locked in 'ubi_wl_put_peb()' and wait for the WL worker to finish. 991 */ 992 vol = ubi->volumes[idx]; 993 if (!vol) { 994 /* No need to do further work, cancel */ 995 dbg_eba("volume %d is being removed, cancel", vol_id); 996 spin_unlock(&ubi->volumes_lock); 997 return 2; 998 } 999 spin_unlock(&ubi->volumes_lock); 1000 1001 /* 1002 * We do not want anybody to write to this logical eraseblock while we 1003 * are moving it, so lock it. 1004 * 1005 * Note, we are using non-waiting locking here, because we cannot sleep 1006 * on the LEB, since it may cause deadlocks. Indeed, imagine a task is 1007 * unmapping the LEB which is mapped to the PEB we are going to move 1008 * (@from). This task locks the LEB and goes sleep in the 1009 * 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are 1010 * holding @ubi->move_mutex and go sleep on the LEB lock. So, if the 1011 * LEB is already locked, we just do not move it and return %1. 1012 */ 1013 err = leb_write_trylock(ubi, vol_id, lnum); 1014 if (err) { 1015 dbg_eba("contention on LEB %d:%d, cancel", vol_id, lnum); 1016 return err; 1017 } 1018 1019 /* 1020 * The LEB might have been put meanwhile, and the task which put it is 1021 * probably waiting on @ubi->move_mutex. No need to continue the work, 1022 * cancel it. 1023 */ 1024 if (vol->eba_tbl[lnum] != from) { 1025 dbg_eba("LEB %d:%d is no longer mapped to PEB %d, mapped to " 1026 "PEB %d, cancel", vol_id, lnum, from, 1027 vol->eba_tbl[lnum]); 1028 err = 1; 1029 goto out_unlock_leb; 1030 } 1031 1032 /* 1033 * OK, now the LEB is locked and we can safely start moving iy. Since 1034 * this function utilizes thie @ubi->peb1_buf buffer which is shared 1035 * with some other functions, so lock the buffer by taking the 1036 * @ubi->buf_mutex. 1037 */ 1038 mutex_lock(&ubi->buf_mutex); 1039 dbg_eba("read %d bytes of data", aldata_size); 1040 err = ubi_io_read_data(ubi, ubi->peb_buf1, from, 0, aldata_size); 1041 if (err && err != UBI_IO_BITFLIPS) { 1042 ubi_warn("error %d while reading data from PEB %d", 1043 err, from); 1044 goto out_unlock_buf; 1045 } 1046 1047 /* 1048 * Now we have got to calculate how much data we have to to copy. In 1049 * case of a static volume it is fairly easy - the VID header contains 1050 * the data size. In case of a dynamic volume it is more difficult - we 1051 * have to read the contents, cut 0xFF bytes from the end and copy only 1052 * the first part. We must do this to avoid writing 0xFF bytes as it 1053 * may have some side-effects. And not only this. It is important not 1054 * to include those 0xFFs to CRC because later the they may be filled 1055 * by data. 1056 */ 1057 if (vid_hdr->vol_type == UBI_VID_DYNAMIC) 1058 aldata_size = data_size = 1059 ubi_calc_data_len(ubi, ubi->peb_buf1, data_size); 1060 1061 cond_resched(); 1062 crc = crc32(UBI_CRC32_INIT, ubi->peb_buf1, data_size); 1063 cond_resched(); 1064 1065 /* 1066 * It may turn out to me that the whole @from physical eraseblock 1067 * contains only 0xFF bytes. Then we have to only write the VID header 1068 * and do not write any data. This also means we should not set 1069 * @vid_hdr->copy_flag, @vid_hdr->data_size, and @vid_hdr->data_crc. 1070 */ 1071 if (data_size > 0) { 1072 vid_hdr->copy_flag = 1; 1073 vid_hdr->data_size = cpu_to_be32(data_size); 1074 vid_hdr->data_crc = cpu_to_be32(crc); 1075 } 1076 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); 1077 1078 err = ubi_io_write_vid_hdr(ubi, to, vid_hdr); 1079 if (err) 1080 goto out_unlock_buf; 1081 1082 cond_resched(); 1083 1084 /* Read the VID header back and check if it was written correctly */ 1085 err = ubi_io_read_vid_hdr(ubi, to, vid_hdr, 1); 1086 if (err) { 1087 if (err != UBI_IO_BITFLIPS) 1088 ubi_warn("cannot read VID header back from PEB %d", to); 1089 else 1090 err = 1; 1091 goto out_unlock_buf; 1092 } 1093 1094 if (data_size > 0) { 1095 err = ubi_io_write_data(ubi, ubi->peb_buf1, to, 0, aldata_size); 1096 if (err) 1097 goto out_unlock_buf; 1098 1099 cond_resched(); 1100 1101 /* 1102 * We've written the data and are going to read it back to make 1103 * sure it was written correctly. 1104 */ 1105 1106 err = ubi_io_read_data(ubi, ubi->peb_buf2, to, 0, aldata_size); 1107 if (err) { 1108 if (err != UBI_IO_BITFLIPS) 1109 ubi_warn("cannot read data back from PEB %d", 1110 to); 1111 else 1112 err = 1; 1113 goto out_unlock_buf; 1114 } 1115 1116 cond_resched(); 1117 1118 if (memcmp(ubi->peb_buf1, ubi->peb_buf2, aldata_size)) { 1119 ubi_warn("read data back from PEB %d - it is different", 1120 to); 1121 goto out_unlock_buf; 1122 } 1123 } 1124 1125 ubi_assert(vol->eba_tbl[lnum] == from); 1126 vol->eba_tbl[lnum] = to; 1127 1128 out_unlock_buf: 1129 mutex_unlock(&ubi->buf_mutex); 1130 out_unlock_leb: 1131 leb_write_unlock(ubi, vol_id, lnum); 1132 return err; 1133 } 1134 1135 /** 1136 * ubi_eba_init_scan - initialize the EBA unit using scanning information. 1137 * @ubi: UBI device description object 1138 * @si: scanning information 1139 * 1140 * This function returns zero in case of success and a negative error code in 1141 * case of failure. 1142 */ 1143 int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si) 1144 { 1145 int i, j, err, num_volumes; 1146 struct ubi_scan_volume *sv; 1147 struct ubi_volume *vol; 1148 struct ubi_scan_leb *seb; 1149 struct rb_node *rb; 1150 1151 dbg_eba("initialize EBA unit"); 1152 1153 spin_lock_init(&ubi->ltree_lock); 1154 mutex_init(&ubi->alc_mutex); 1155 ubi->ltree = RB_ROOT; 1156 1157 ubi->global_sqnum = si->max_sqnum + 1; 1158 num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT; 1159 1160 for (i = 0; i < num_volumes; i++) { 1161 vol = ubi->volumes[i]; 1162 if (!vol) 1163 continue; 1164 1165 cond_resched(); 1166 1167 vol->eba_tbl = kmalloc(vol->reserved_pebs * sizeof(int), 1168 GFP_KERNEL); 1169 if (!vol->eba_tbl) { 1170 err = -ENOMEM; 1171 goto out_free; 1172 } 1173 1174 for (j = 0; j < vol->reserved_pebs; j++) 1175 vol->eba_tbl[j] = UBI_LEB_UNMAPPED; 1176 1177 sv = ubi_scan_find_sv(si, idx2vol_id(ubi, i)); 1178 if (!sv) 1179 continue; 1180 1181 ubi_rb_for_each_entry(rb, seb, &sv->root, u.rb) { 1182 if (seb->lnum >= vol->reserved_pebs) 1183 /* 1184 * This may happen in case of an unclean reboot 1185 * during re-size. 1186 */ 1187 ubi_scan_move_to_list(sv, seb, &si->erase); 1188 vol->eba_tbl[seb->lnum] = seb->pnum; 1189 } 1190 } 1191 1192 if (ubi->avail_pebs < EBA_RESERVED_PEBS) { 1193 ubi_err("no enough physical eraseblocks (%d, need %d)", 1194 ubi->avail_pebs, EBA_RESERVED_PEBS); 1195 err = -ENOSPC; 1196 goto out_free; 1197 } 1198 ubi->avail_pebs -= EBA_RESERVED_PEBS; 1199 ubi->rsvd_pebs += EBA_RESERVED_PEBS; 1200 1201 if (ubi->bad_allowed) { 1202 ubi_calculate_reserved(ubi); 1203 1204 if (ubi->avail_pebs < ubi->beb_rsvd_level) { 1205 /* No enough free physical eraseblocks */ 1206 ubi->beb_rsvd_pebs = ubi->avail_pebs; 1207 ubi_warn("cannot reserve enough PEBs for bad PEB " 1208 "handling, reserved %d, need %d", 1209 ubi->beb_rsvd_pebs, ubi->beb_rsvd_level); 1210 } else 1211 ubi->beb_rsvd_pebs = ubi->beb_rsvd_level; 1212 1213 ubi->avail_pebs -= ubi->beb_rsvd_pebs; 1214 ubi->rsvd_pebs += ubi->beb_rsvd_pebs; 1215 } 1216 1217 dbg_eba("EBA unit is initialized"); 1218 return 0; 1219 1220 out_free: 1221 for (i = 0; i < num_volumes; i++) { 1222 if (!ubi->volumes[i]) 1223 continue; 1224 kfree(ubi->volumes[i]->eba_tbl); 1225 } 1226 return err; 1227 } 1228 1229 /** 1230 * ubi_eba_close - close EBA unit. 1231 * @ubi: UBI device description object 1232 */ 1233 void ubi_eba_close(const struct ubi_device *ubi) 1234 { 1235 int i, num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT; 1236 1237 dbg_eba("close EBA unit"); 1238 1239 for (i = 0; i < num_volumes; i++) { 1240 if (!ubi->volumes[i]) 1241 continue; 1242 kfree(ubi->volumes[i]->eba_tbl); 1243 } 1244 } 1245