1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright (c) 2012 Linutronix GmbH 4 * Copyright (c) 2014 sigma star gmbh 5 * Author: Richard Weinberger <richard@nod.at> 6 * 7 */ 8 9 #ifndef __UBOOT__ 10 #include <linux/crc32.h> 11 #else 12 #include <div64.h> 13 #include <malloc.h> 14 #include <ubi_uboot.h> 15 #endif 16 17 #include <linux/compat.h> 18 #include <linux/math64.h> 19 #include "ubi.h" 20 21 /** 22 * init_seen - allocate memory for used for debugging. 23 * @ubi: UBI device description object 24 */ 25 static inline int *init_seen(struct ubi_device *ubi) 26 { 27 int *ret; 28 29 if (!ubi_dbg_chk_fastmap(ubi)) 30 return NULL; 31 32 ret = kcalloc(ubi->peb_count, sizeof(int), GFP_KERNEL); 33 if (!ret) 34 return ERR_PTR(-ENOMEM); 35 36 return ret; 37 } 38 39 /** 40 * free_seen - free the seen logic integer array. 41 * @seen: integer array of @ubi->peb_count size 42 */ 43 static inline void free_seen(int *seen) 44 { 45 kfree(seen); 46 } 47 48 /** 49 * set_seen - mark a PEB as seen. 50 * @ubi: UBI device description object 51 * @pnum: The PEB to be makred as seen 52 * @seen: integer array of @ubi->peb_count size 53 */ 54 static inline void set_seen(struct ubi_device *ubi, int pnum, int *seen) 55 { 56 if (!ubi_dbg_chk_fastmap(ubi) || !seen) 57 return; 58 59 seen[pnum] = 1; 60 } 61 62 /** 63 * self_check_seen - check whether all PEB have been seen by fastmap. 64 * @ubi: UBI device description object 65 * @seen: integer array of @ubi->peb_count size 66 */ 67 static int self_check_seen(struct ubi_device *ubi, int *seen) 68 { 69 int pnum, ret = 0; 70 71 if (!ubi_dbg_chk_fastmap(ubi) || !seen) 72 return 0; 73 74 for (pnum = 0; pnum < ubi->peb_count; pnum++) { 75 if (!seen[pnum] && ubi->lookuptbl[pnum]) { 76 ubi_err(ubi, "self-check failed for PEB %d, fastmap didn't see it", pnum); 77 ret = -EINVAL; 78 } 79 } 80 81 return ret; 82 } 83 84 /** 85 * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device. 86 * @ubi: UBI device description object 87 */ 88 size_t ubi_calc_fm_size(struct ubi_device *ubi) 89 { 90 size_t size; 91 92 size = sizeof(struct ubi_fm_sb) + 93 sizeof(struct ubi_fm_hdr) + 94 sizeof(struct ubi_fm_scan_pool) + 95 sizeof(struct ubi_fm_scan_pool) + 96 (ubi->peb_count * sizeof(struct ubi_fm_ec)) + 97 (sizeof(struct ubi_fm_eba) + 98 (ubi->peb_count * sizeof(__be32))) + 99 sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES; 100 return roundup(size, ubi->leb_size); 101 } 102 103 104 /** 105 * new_fm_vhdr - allocate a new volume header for fastmap usage. 106 * @ubi: UBI device description object 107 * @vol_id: the VID of the new header 108 * 109 * Returns a new struct ubi_vid_hdr on success. 110 * NULL indicates out of memory. 111 */ 112 static struct ubi_vid_hdr *new_fm_vhdr(struct ubi_device *ubi, int vol_id) 113 { 114 struct ubi_vid_hdr *new; 115 116 new = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); 117 if (!new) 118 goto out; 119 120 new->vol_type = UBI_VID_DYNAMIC; 121 new->vol_id = cpu_to_be32(vol_id); 122 123 /* UBI implementations without fastmap support have to delete the 124 * fastmap. 125 */ 126 new->compat = UBI_COMPAT_DELETE; 127 128 out: 129 return new; 130 } 131 132 /** 133 * add_aeb - create and add a attach erase block to a given list. 134 * @ai: UBI attach info object 135 * @list: the target list 136 * @pnum: PEB number of the new attach erase block 137 * @ec: erease counter of the new LEB 138 * @scrub: scrub this PEB after attaching 139 * 140 * Returns 0 on success, < 0 indicates an internal error. 141 */ 142 static int add_aeb(struct ubi_attach_info *ai, struct list_head *list, 143 int pnum, int ec, int scrub) 144 { 145 struct ubi_ainf_peb *aeb; 146 147 aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL); 148 if (!aeb) 149 return -ENOMEM; 150 151 aeb->pnum = pnum; 152 aeb->ec = ec; 153 aeb->lnum = -1; 154 aeb->scrub = scrub; 155 aeb->copy_flag = aeb->sqnum = 0; 156 157 ai->ec_sum += aeb->ec; 158 ai->ec_count++; 159 160 if (ai->max_ec < aeb->ec) 161 ai->max_ec = aeb->ec; 162 163 if (ai->min_ec > aeb->ec) 164 ai->min_ec = aeb->ec; 165 166 list_add_tail(&aeb->u.list, list); 167 168 return 0; 169 } 170 171 /** 172 * add_vol - create and add a new volume to ubi_attach_info. 173 * @ai: ubi_attach_info object 174 * @vol_id: VID of the new volume 175 * @used_ebs: number of used EBS 176 * @data_pad: data padding value of the new volume 177 * @vol_type: volume type 178 * @last_eb_bytes: number of bytes in the last LEB 179 * 180 * Returns the new struct ubi_ainf_volume on success. 181 * NULL indicates an error. 182 */ 183 static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id, 184 int used_ebs, int data_pad, u8 vol_type, 185 int last_eb_bytes) 186 { 187 struct ubi_ainf_volume *av; 188 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL; 189 190 while (*p) { 191 parent = *p; 192 av = rb_entry(parent, struct ubi_ainf_volume, rb); 193 194 if (vol_id > av->vol_id) 195 p = &(*p)->rb_left; 196 else if (vol_id < av->vol_id) 197 p = &(*p)->rb_right; 198 else 199 return ERR_PTR(-EINVAL); 200 } 201 202 av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL); 203 if (!av) 204 goto out; 205 206 av->highest_lnum = av->leb_count = av->used_ebs = 0; 207 av->vol_id = vol_id; 208 av->data_pad = data_pad; 209 av->last_data_size = last_eb_bytes; 210 av->compat = 0; 211 av->vol_type = vol_type; 212 av->root = RB_ROOT; 213 if (av->vol_type == UBI_STATIC_VOLUME) 214 av->used_ebs = used_ebs; 215 216 dbg_bld("found volume (ID %i)", vol_id); 217 218 rb_link_node(&av->rb, parent, p); 219 rb_insert_color(&av->rb, &ai->volumes); 220 221 out: 222 return av; 223 } 224 225 /** 226 * assign_aeb_to_av - assigns a SEB to a given ainf_volume and removes it 227 * from it's original list. 228 * @ai: ubi_attach_info object 229 * @aeb: the to be assigned SEB 230 * @av: target scan volume 231 */ 232 static void assign_aeb_to_av(struct ubi_attach_info *ai, 233 struct ubi_ainf_peb *aeb, 234 struct ubi_ainf_volume *av) 235 { 236 struct ubi_ainf_peb *tmp_aeb; 237 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL; 238 239 p = &av->root.rb_node; 240 while (*p) { 241 parent = *p; 242 243 tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb); 244 if (aeb->lnum != tmp_aeb->lnum) { 245 if (aeb->lnum < tmp_aeb->lnum) 246 p = &(*p)->rb_left; 247 else 248 p = &(*p)->rb_right; 249 250 continue; 251 } else 252 break; 253 } 254 255 list_del(&aeb->u.list); 256 av->leb_count++; 257 258 rb_link_node(&aeb->u.rb, parent, p); 259 rb_insert_color(&aeb->u.rb, &av->root); 260 } 261 262 /** 263 * update_vol - inserts or updates a LEB which was found a pool. 264 * @ubi: the UBI device object 265 * @ai: attach info object 266 * @av: the volume this LEB belongs to 267 * @new_vh: the volume header derived from new_aeb 268 * @new_aeb: the AEB to be examined 269 * 270 * Returns 0 on success, < 0 indicates an internal error. 271 */ 272 static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai, 273 struct ubi_ainf_volume *av, struct ubi_vid_hdr *new_vh, 274 struct ubi_ainf_peb *new_aeb) 275 { 276 struct rb_node **p = &av->root.rb_node, *parent = NULL; 277 struct ubi_ainf_peb *aeb, *victim; 278 int cmp_res; 279 280 while (*p) { 281 parent = *p; 282 aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb); 283 284 if (be32_to_cpu(new_vh->lnum) != aeb->lnum) { 285 if (be32_to_cpu(new_vh->lnum) < aeb->lnum) 286 p = &(*p)->rb_left; 287 else 288 p = &(*p)->rb_right; 289 290 continue; 291 } 292 293 /* This case can happen if the fastmap gets written 294 * because of a volume change (creation, deletion, ..). 295 * Then a PEB can be within the persistent EBA and the pool. 296 */ 297 if (aeb->pnum == new_aeb->pnum) { 298 ubi_assert(aeb->lnum == new_aeb->lnum); 299 kmem_cache_free(ai->aeb_slab_cache, new_aeb); 300 301 return 0; 302 } 303 304 cmp_res = ubi_compare_lebs(ubi, aeb, new_aeb->pnum, new_vh); 305 if (cmp_res < 0) 306 return cmp_res; 307 308 /* new_aeb is newer */ 309 if (cmp_res & 1) { 310 victim = kmem_cache_alloc(ai->aeb_slab_cache, 311 GFP_KERNEL); 312 if (!victim) 313 return -ENOMEM; 314 315 victim->ec = aeb->ec; 316 victim->pnum = aeb->pnum; 317 list_add_tail(&victim->u.list, &ai->erase); 318 319 if (av->highest_lnum == be32_to_cpu(new_vh->lnum)) 320 av->last_data_size = 321 be32_to_cpu(new_vh->data_size); 322 323 dbg_bld("vol %i: AEB %i's PEB %i is the newer", 324 av->vol_id, aeb->lnum, new_aeb->pnum); 325 326 aeb->ec = new_aeb->ec; 327 aeb->pnum = new_aeb->pnum; 328 aeb->copy_flag = new_vh->copy_flag; 329 aeb->scrub = new_aeb->scrub; 330 kmem_cache_free(ai->aeb_slab_cache, new_aeb); 331 332 /* new_aeb is older */ 333 } else { 334 dbg_bld("vol %i: AEB %i's PEB %i is old, dropping it", 335 av->vol_id, aeb->lnum, new_aeb->pnum); 336 list_add_tail(&new_aeb->u.list, &ai->erase); 337 } 338 339 return 0; 340 } 341 /* This LEB is new, let's add it to the volume */ 342 343 if (av->highest_lnum <= be32_to_cpu(new_vh->lnum)) { 344 av->highest_lnum = be32_to_cpu(new_vh->lnum); 345 av->last_data_size = be32_to_cpu(new_vh->data_size); 346 } 347 348 if (av->vol_type == UBI_STATIC_VOLUME) 349 av->used_ebs = be32_to_cpu(new_vh->used_ebs); 350 351 av->leb_count++; 352 353 rb_link_node(&new_aeb->u.rb, parent, p); 354 rb_insert_color(&new_aeb->u.rb, &av->root); 355 356 return 0; 357 } 358 359 /** 360 * process_pool_aeb - we found a non-empty PEB in a pool. 361 * @ubi: UBI device object 362 * @ai: attach info object 363 * @new_vh: the volume header derived from new_aeb 364 * @new_aeb: the AEB to be examined 365 * 366 * Returns 0 on success, < 0 indicates an internal error. 367 */ 368 static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai, 369 struct ubi_vid_hdr *new_vh, 370 struct ubi_ainf_peb *new_aeb) 371 { 372 struct ubi_ainf_volume *av, *tmp_av = NULL; 373 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL; 374 int found = 0; 375 376 if (be32_to_cpu(new_vh->vol_id) == UBI_FM_SB_VOLUME_ID || 377 be32_to_cpu(new_vh->vol_id) == UBI_FM_DATA_VOLUME_ID) { 378 kmem_cache_free(ai->aeb_slab_cache, new_aeb); 379 380 return 0; 381 } 382 383 /* Find the volume this SEB belongs to */ 384 while (*p) { 385 parent = *p; 386 tmp_av = rb_entry(parent, struct ubi_ainf_volume, rb); 387 388 if (be32_to_cpu(new_vh->vol_id) > tmp_av->vol_id) 389 p = &(*p)->rb_left; 390 else if (be32_to_cpu(new_vh->vol_id) < tmp_av->vol_id) 391 p = &(*p)->rb_right; 392 else { 393 found = 1; 394 break; 395 } 396 } 397 398 if (found) 399 av = tmp_av; 400 else { 401 ubi_err(ubi, "orphaned volume in fastmap pool!"); 402 kmem_cache_free(ai->aeb_slab_cache, new_aeb); 403 return UBI_BAD_FASTMAP; 404 } 405 406 ubi_assert(be32_to_cpu(new_vh->vol_id) == av->vol_id); 407 408 return update_vol(ubi, ai, av, new_vh, new_aeb); 409 } 410 411 /** 412 * unmap_peb - unmap a PEB. 413 * If fastmap detects a free PEB in the pool it has to check whether 414 * this PEB has been unmapped after writing the fastmap. 415 * 416 * @ai: UBI attach info object 417 * @pnum: The PEB to be unmapped 418 */ 419 static void unmap_peb(struct ubi_attach_info *ai, int pnum) 420 { 421 struct ubi_ainf_volume *av; 422 struct rb_node *node, *node2; 423 struct ubi_ainf_peb *aeb; 424 425 for (node = rb_first(&ai->volumes); node; node = rb_next(node)) { 426 av = rb_entry(node, struct ubi_ainf_volume, rb); 427 428 for (node2 = rb_first(&av->root); node2; 429 node2 = rb_next(node2)) { 430 aeb = rb_entry(node2, struct ubi_ainf_peb, u.rb); 431 if (aeb->pnum == pnum) { 432 rb_erase(&aeb->u.rb, &av->root); 433 av->leb_count--; 434 kmem_cache_free(ai->aeb_slab_cache, aeb); 435 return; 436 } 437 } 438 } 439 } 440 441 /** 442 * scan_pool - scans a pool for changed (no longer empty PEBs). 443 * @ubi: UBI device object 444 * @ai: attach info object 445 * @pebs: an array of all PEB numbers in the to be scanned pool 446 * @pool_size: size of the pool (number of entries in @pebs) 447 * @max_sqnum: pointer to the maximal sequence number 448 * @free: list of PEBs which are most likely free (and go into @ai->free) 449 * 450 * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned. 451 * < 0 indicates an internal error. 452 */ 453 #ifndef __UBOOT__ 454 static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai, 455 __be32 *pebs, int pool_size, unsigned long long *max_sqnum, 456 struct list_head *free) 457 #else 458 static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai, 459 __be32 *pebs, int pool_size, unsigned long long *max_sqnum, 460 struct list_head *free) 461 #endif 462 { 463 struct ubi_vid_hdr *vh; 464 struct ubi_ec_hdr *ech; 465 struct ubi_ainf_peb *new_aeb; 466 int i, pnum, err, ret = 0; 467 468 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); 469 if (!ech) 470 return -ENOMEM; 471 472 vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); 473 if (!vh) { 474 kfree(ech); 475 return -ENOMEM; 476 } 477 478 dbg_bld("scanning fastmap pool: size = %i", pool_size); 479 480 /* 481 * Now scan all PEBs in the pool to find changes which have been made 482 * after the creation of the fastmap 483 */ 484 for (i = 0; i < pool_size; i++) { 485 int scrub = 0; 486 int image_seq; 487 488 pnum = be32_to_cpu(pebs[i]); 489 490 if (ubi_io_is_bad(ubi, pnum)) { 491 ubi_err(ubi, "bad PEB in fastmap pool!"); 492 ret = UBI_BAD_FASTMAP; 493 goto out; 494 } 495 496 err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0); 497 if (err && err != UBI_IO_BITFLIPS) { 498 ubi_err(ubi, "unable to read EC header! PEB:%i err:%i", 499 pnum, err); 500 ret = err > 0 ? UBI_BAD_FASTMAP : err; 501 goto out; 502 } else if (err == UBI_IO_BITFLIPS) 503 scrub = 1; 504 505 /* 506 * Older UBI implementations have image_seq set to zero, so 507 * we shouldn't fail if image_seq == 0. 508 */ 509 image_seq = be32_to_cpu(ech->image_seq); 510 511 if (image_seq && (image_seq != ubi->image_seq)) { 512 ubi_err(ubi, "bad image seq: 0x%x, expected: 0x%x", 513 be32_to_cpu(ech->image_seq), ubi->image_seq); 514 ret = UBI_BAD_FASTMAP; 515 goto out; 516 } 517 518 err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0); 519 if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) { 520 unsigned long long ec = be64_to_cpu(ech->ec); 521 unmap_peb(ai, pnum); 522 dbg_bld("Adding PEB to free: %i", pnum); 523 if (err == UBI_IO_FF_BITFLIPS) 524 add_aeb(ai, free, pnum, ec, 1); 525 else 526 add_aeb(ai, free, pnum, ec, 0); 527 continue; 528 } else if (err == 0 || err == UBI_IO_BITFLIPS) { 529 dbg_bld("Found non empty PEB:%i in pool", pnum); 530 531 if (err == UBI_IO_BITFLIPS) 532 scrub = 1; 533 534 new_aeb = kmem_cache_alloc(ai->aeb_slab_cache, 535 GFP_KERNEL); 536 if (!new_aeb) { 537 ret = -ENOMEM; 538 goto out; 539 } 540 541 new_aeb->ec = be64_to_cpu(ech->ec); 542 new_aeb->pnum = pnum; 543 new_aeb->lnum = be32_to_cpu(vh->lnum); 544 new_aeb->sqnum = be64_to_cpu(vh->sqnum); 545 new_aeb->copy_flag = vh->copy_flag; 546 new_aeb->scrub = scrub; 547 548 if (*max_sqnum < new_aeb->sqnum) 549 *max_sqnum = new_aeb->sqnum; 550 551 err = process_pool_aeb(ubi, ai, vh, new_aeb); 552 if (err) { 553 ret = err > 0 ? UBI_BAD_FASTMAP : err; 554 goto out; 555 } 556 } else { 557 /* We are paranoid and fall back to scanning mode */ 558 ubi_err(ubi, "fastmap pool PEBs contains damaged PEBs!"); 559 ret = err > 0 ? UBI_BAD_FASTMAP : err; 560 goto out; 561 } 562 563 } 564 565 out: 566 ubi_free_vid_hdr(ubi, vh); 567 kfree(ech); 568 return ret; 569 } 570 571 /** 572 * count_fastmap_pebs - Counts the PEBs found by fastmap. 573 * @ai: The UBI attach info object 574 */ 575 static int count_fastmap_pebs(struct ubi_attach_info *ai) 576 { 577 struct ubi_ainf_peb *aeb; 578 struct ubi_ainf_volume *av; 579 struct rb_node *rb1, *rb2; 580 int n = 0; 581 582 list_for_each_entry(aeb, &ai->erase, u.list) 583 n++; 584 585 list_for_each_entry(aeb, &ai->free, u.list) 586 n++; 587 588 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) 589 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) 590 n++; 591 592 return n; 593 } 594 595 /** 596 * ubi_attach_fastmap - creates ubi_attach_info from a fastmap. 597 * @ubi: UBI device object 598 * @ai: UBI attach info object 599 * @fm: the fastmap to be attached 600 * 601 * Returns 0 on success, UBI_BAD_FASTMAP if the found fastmap was unusable. 602 * < 0 indicates an internal error. 603 */ 604 static int ubi_attach_fastmap(struct ubi_device *ubi, 605 struct ubi_attach_info *ai, 606 struct ubi_fastmap_layout *fm) 607 { 608 struct list_head used, free; 609 struct ubi_ainf_volume *av; 610 struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb; 611 struct ubi_fm_sb *fmsb; 612 struct ubi_fm_hdr *fmhdr; 613 struct ubi_fm_scan_pool *fmpl, *fmpl_wl; 614 struct ubi_fm_ec *fmec; 615 struct ubi_fm_volhdr *fmvhdr; 616 struct ubi_fm_eba *fm_eba; 617 int ret, i, j, pool_size, wl_pool_size; 618 size_t fm_pos = 0, fm_size = ubi->fm_size; 619 unsigned long long max_sqnum = 0; 620 void *fm_raw = ubi->fm_buf; 621 622 INIT_LIST_HEAD(&used); 623 INIT_LIST_HEAD(&free); 624 ai->min_ec = UBI_MAX_ERASECOUNTER; 625 626 fmsb = (struct ubi_fm_sb *)(fm_raw); 627 ai->max_sqnum = fmsb->sqnum; 628 fm_pos += sizeof(struct ubi_fm_sb); 629 if (fm_pos >= fm_size) 630 goto fail_bad; 631 632 fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos); 633 fm_pos += sizeof(*fmhdr); 634 if (fm_pos >= fm_size) 635 goto fail_bad; 636 637 if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) { 638 ubi_err(ubi, "bad fastmap header magic: 0x%x, expected: 0x%x", 639 be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC); 640 goto fail_bad; 641 } 642 643 fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos); 644 fm_pos += sizeof(*fmpl); 645 if (fm_pos >= fm_size) 646 goto fail_bad; 647 if (be32_to_cpu(fmpl->magic) != UBI_FM_POOL_MAGIC) { 648 ubi_err(ubi, "bad fastmap pool magic: 0x%x, expected: 0x%x", 649 be32_to_cpu(fmpl->magic), UBI_FM_POOL_MAGIC); 650 goto fail_bad; 651 } 652 653 fmpl_wl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos); 654 fm_pos += sizeof(*fmpl_wl); 655 if (fm_pos >= fm_size) 656 goto fail_bad; 657 if (be32_to_cpu(fmpl_wl->magic) != UBI_FM_POOL_MAGIC) { 658 ubi_err(ubi, "bad fastmap WL pool magic: 0x%x, expected: 0x%x", 659 be32_to_cpu(fmpl_wl->magic), UBI_FM_POOL_MAGIC); 660 goto fail_bad; 661 } 662 663 pool_size = be16_to_cpu(fmpl->size); 664 wl_pool_size = be16_to_cpu(fmpl_wl->size); 665 fm->max_pool_size = be16_to_cpu(fmpl->max_size); 666 fm->max_wl_pool_size = be16_to_cpu(fmpl_wl->max_size); 667 668 if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) { 669 ubi_err(ubi, "bad pool size: %i", pool_size); 670 goto fail_bad; 671 } 672 673 if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) { 674 ubi_err(ubi, "bad WL pool size: %i", wl_pool_size); 675 goto fail_bad; 676 } 677 678 679 if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE || 680 fm->max_pool_size < 0) { 681 ubi_err(ubi, "bad maximal pool size: %i", fm->max_pool_size); 682 goto fail_bad; 683 } 684 685 if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE || 686 fm->max_wl_pool_size < 0) { 687 ubi_err(ubi, "bad maximal WL pool size: %i", 688 fm->max_wl_pool_size); 689 goto fail_bad; 690 } 691 692 /* read EC values from free list */ 693 for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) { 694 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 695 fm_pos += sizeof(*fmec); 696 if (fm_pos >= fm_size) 697 goto fail_bad; 698 699 add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum), 700 be32_to_cpu(fmec->ec), 0); 701 } 702 703 /* read EC values from used list */ 704 for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) { 705 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 706 fm_pos += sizeof(*fmec); 707 if (fm_pos >= fm_size) 708 goto fail_bad; 709 710 add_aeb(ai, &used, be32_to_cpu(fmec->pnum), 711 be32_to_cpu(fmec->ec), 0); 712 } 713 714 /* read EC values from scrub list */ 715 for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) { 716 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 717 fm_pos += sizeof(*fmec); 718 if (fm_pos >= fm_size) 719 goto fail_bad; 720 721 add_aeb(ai, &used, be32_to_cpu(fmec->pnum), 722 be32_to_cpu(fmec->ec), 1); 723 } 724 725 /* read EC values from erase list */ 726 for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) { 727 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 728 fm_pos += sizeof(*fmec); 729 if (fm_pos >= fm_size) 730 goto fail_bad; 731 732 add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum), 733 be32_to_cpu(fmec->ec), 1); 734 } 735 736 ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count); 737 ai->bad_peb_count = be32_to_cpu(fmhdr->bad_peb_count); 738 739 /* Iterate over all volumes and read their EBA table */ 740 for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) { 741 fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos); 742 fm_pos += sizeof(*fmvhdr); 743 if (fm_pos >= fm_size) 744 goto fail_bad; 745 746 if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) { 747 ubi_err(ubi, "bad fastmap vol header magic: 0x%x, expected: 0x%x", 748 be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC); 749 goto fail_bad; 750 } 751 752 av = add_vol(ai, be32_to_cpu(fmvhdr->vol_id), 753 be32_to_cpu(fmvhdr->used_ebs), 754 be32_to_cpu(fmvhdr->data_pad), 755 fmvhdr->vol_type, 756 be32_to_cpu(fmvhdr->last_eb_bytes)); 757 758 if (!av) 759 goto fail_bad; 760 if (PTR_ERR(av) == -EINVAL) { 761 ubi_err(ubi, "volume (ID %i) already exists", 762 fmvhdr->vol_id); 763 goto fail_bad; 764 } 765 766 ai->vols_found++; 767 if (ai->highest_vol_id < be32_to_cpu(fmvhdr->vol_id)) 768 ai->highest_vol_id = be32_to_cpu(fmvhdr->vol_id); 769 770 fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos); 771 fm_pos += sizeof(*fm_eba); 772 fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs)); 773 if (fm_pos >= fm_size) 774 goto fail_bad; 775 776 if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) { 777 ubi_err(ubi, "bad fastmap EBA header magic: 0x%x, expected: 0x%x", 778 be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC); 779 goto fail_bad; 780 } 781 782 for (j = 0; j < be32_to_cpu(fm_eba->reserved_pebs); j++) { 783 int pnum = be32_to_cpu(fm_eba->pnum[j]); 784 785 if ((int)be32_to_cpu(fm_eba->pnum[j]) < 0) 786 continue; 787 788 aeb = NULL; 789 list_for_each_entry(tmp_aeb, &used, u.list) { 790 if (tmp_aeb->pnum == pnum) { 791 aeb = tmp_aeb; 792 break; 793 } 794 } 795 796 if (!aeb) { 797 ubi_err(ubi, "PEB %i is in EBA but not in used list", pnum); 798 goto fail_bad; 799 } 800 801 aeb->lnum = j; 802 803 if (av->highest_lnum <= aeb->lnum) 804 av->highest_lnum = aeb->lnum; 805 806 assign_aeb_to_av(ai, aeb, av); 807 808 dbg_bld("inserting PEB:%i (LEB %i) to vol %i", 809 aeb->pnum, aeb->lnum, av->vol_id); 810 } 811 } 812 813 ret = scan_pool(ubi, ai, fmpl->pebs, pool_size, &max_sqnum, &free); 814 if (ret) 815 goto fail; 816 817 ret = scan_pool(ubi, ai, fmpl_wl->pebs, wl_pool_size, &max_sqnum, &free); 818 if (ret) 819 goto fail; 820 821 if (max_sqnum > ai->max_sqnum) 822 ai->max_sqnum = max_sqnum; 823 824 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) 825 list_move_tail(&tmp_aeb->u.list, &ai->free); 826 827 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) 828 list_move_tail(&tmp_aeb->u.list, &ai->erase); 829 830 ubi_assert(list_empty(&free)); 831 832 /* 833 * If fastmap is leaking PEBs (must not happen), raise a 834 * fat warning and fall back to scanning mode. 835 * We do this here because in ubi_wl_init() it's too late 836 * and we cannot fall back to scanning. 837 */ 838 #ifndef __UBOOT__ 839 if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count - 840 ai->bad_peb_count - fm->used_blocks)) 841 goto fail_bad; 842 #else 843 if (count_fastmap_pebs(ai) != ubi->peb_count - 844 ai->bad_peb_count - fm->used_blocks) { 845 WARN_ON(1); 846 goto fail_bad; 847 } 848 #endif 849 850 return 0; 851 852 fail_bad: 853 ret = UBI_BAD_FASTMAP; 854 fail: 855 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) { 856 list_del(&tmp_aeb->u.list); 857 kmem_cache_free(ai->aeb_slab_cache, tmp_aeb); 858 } 859 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) { 860 list_del(&tmp_aeb->u.list); 861 kmem_cache_free(ai->aeb_slab_cache, tmp_aeb); 862 } 863 864 return ret; 865 } 866 867 /** 868 * ubi_scan_fastmap - scan the fastmap. 869 * @ubi: UBI device object 870 * @ai: UBI attach info to be filled 871 * @fm_anchor: The fastmap starts at this PEB 872 * 873 * Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found, 874 * UBI_BAD_FASTMAP if one was found but is not usable. 875 * < 0 indicates an internal error. 876 */ 877 int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai, 878 int fm_anchor) 879 { 880 struct ubi_fm_sb *fmsb, *fmsb2; 881 struct ubi_vid_hdr *vh; 882 struct ubi_ec_hdr *ech; 883 struct ubi_fastmap_layout *fm; 884 int i, used_blocks, pnum, ret = 0; 885 size_t fm_size; 886 __be32 crc, tmp_crc; 887 unsigned long long sqnum = 0; 888 889 down_write(&ubi->fm_protect); 890 memset(ubi->fm_buf, 0, ubi->fm_size); 891 892 fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL); 893 if (!fmsb) { 894 ret = -ENOMEM; 895 goto out; 896 } 897 898 fm = kzalloc(sizeof(*fm), GFP_KERNEL); 899 if (!fm) { 900 ret = -ENOMEM; 901 kfree(fmsb); 902 goto out; 903 } 904 905 ret = ubi_io_read(ubi, fmsb, fm_anchor, ubi->leb_start, sizeof(*fmsb)); 906 if (ret && ret != UBI_IO_BITFLIPS) 907 goto free_fm_sb; 908 else if (ret == UBI_IO_BITFLIPS) 909 fm->to_be_tortured[0] = 1; 910 911 if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) { 912 ubi_err(ubi, "bad super block magic: 0x%x, expected: 0x%x", 913 be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC); 914 ret = UBI_BAD_FASTMAP; 915 goto free_fm_sb; 916 } 917 918 if (fmsb->version != UBI_FM_FMT_VERSION) { 919 ubi_err(ubi, "bad fastmap version: %i, expected: %i", 920 fmsb->version, UBI_FM_FMT_VERSION); 921 ret = UBI_BAD_FASTMAP; 922 goto free_fm_sb; 923 } 924 925 used_blocks = be32_to_cpu(fmsb->used_blocks); 926 if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) { 927 ubi_err(ubi, "number of fastmap blocks is invalid: %i", 928 used_blocks); 929 ret = UBI_BAD_FASTMAP; 930 goto free_fm_sb; 931 } 932 933 fm_size = ubi->leb_size * used_blocks; 934 if (fm_size != ubi->fm_size) { 935 ubi_err(ubi, "bad fastmap size: %zi, expected: %zi", 936 fm_size, ubi->fm_size); 937 ret = UBI_BAD_FASTMAP; 938 goto free_fm_sb; 939 } 940 941 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); 942 if (!ech) { 943 ret = -ENOMEM; 944 goto free_fm_sb; 945 } 946 947 vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); 948 if (!vh) { 949 ret = -ENOMEM; 950 goto free_hdr; 951 } 952 953 for (i = 0; i < used_blocks; i++) { 954 int image_seq; 955 956 pnum = be32_to_cpu(fmsb->block_loc[i]); 957 958 if (ubi_io_is_bad(ubi, pnum)) { 959 ret = UBI_BAD_FASTMAP; 960 goto free_hdr; 961 } 962 963 ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0); 964 if (ret && ret != UBI_IO_BITFLIPS) { 965 ubi_err(ubi, "unable to read fastmap block# %i EC (PEB: %i)", 966 i, pnum); 967 if (ret > 0) 968 ret = UBI_BAD_FASTMAP; 969 goto free_hdr; 970 } else if (ret == UBI_IO_BITFLIPS) 971 fm->to_be_tortured[i] = 1; 972 973 image_seq = be32_to_cpu(ech->image_seq); 974 if (!ubi->image_seq) 975 ubi->image_seq = image_seq; 976 977 /* 978 * Older UBI implementations have image_seq set to zero, so 979 * we shouldn't fail if image_seq == 0. 980 */ 981 if (image_seq && (image_seq != ubi->image_seq)) { 982 ubi_err(ubi, "wrong image seq:%d instead of %d", 983 be32_to_cpu(ech->image_seq), ubi->image_seq); 984 ret = UBI_BAD_FASTMAP; 985 goto free_hdr; 986 } 987 988 ret = ubi_io_read_vid_hdr(ubi, pnum, vh, 0); 989 if (ret && ret != UBI_IO_BITFLIPS) { 990 ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i)", 991 i, pnum); 992 goto free_hdr; 993 } 994 995 if (i == 0) { 996 if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) { 997 ubi_err(ubi, "bad fastmap anchor vol_id: 0x%x, expected: 0x%x", 998 be32_to_cpu(vh->vol_id), 999 UBI_FM_SB_VOLUME_ID); 1000 ret = UBI_BAD_FASTMAP; 1001 goto free_hdr; 1002 } 1003 } else { 1004 if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) { 1005 ubi_err(ubi, "bad fastmap data vol_id: 0x%x, expected: 0x%x", 1006 be32_to_cpu(vh->vol_id), 1007 UBI_FM_DATA_VOLUME_ID); 1008 ret = UBI_BAD_FASTMAP; 1009 goto free_hdr; 1010 } 1011 } 1012 1013 if (sqnum < be64_to_cpu(vh->sqnum)) 1014 sqnum = be64_to_cpu(vh->sqnum); 1015 1016 ret = ubi_io_read(ubi, ubi->fm_buf + (ubi->leb_size * i), pnum, 1017 ubi->leb_start, ubi->leb_size); 1018 if (ret && ret != UBI_IO_BITFLIPS) { 1019 ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i, " 1020 "err: %i)", i, pnum, ret); 1021 goto free_hdr; 1022 } 1023 } 1024 1025 kfree(fmsb); 1026 fmsb = NULL; 1027 1028 fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf); 1029 tmp_crc = be32_to_cpu(fmsb2->data_crc); 1030 fmsb2->data_crc = 0; 1031 crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size); 1032 if (crc != tmp_crc) { 1033 ubi_err(ubi, "fastmap data CRC is invalid"); 1034 ubi_err(ubi, "CRC should be: 0x%x, calc: 0x%x", 1035 tmp_crc, crc); 1036 ret = UBI_BAD_FASTMAP; 1037 goto free_hdr; 1038 } 1039 1040 fmsb2->sqnum = sqnum; 1041 1042 fm->used_blocks = used_blocks; 1043 1044 ret = ubi_attach_fastmap(ubi, ai, fm); 1045 if (ret) { 1046 if (ret > 0) 1047 ret = UBI_BAD_FASTMAP; 1048 goto free_hdr; 1049 } 1050 1051 for (i = 0; i < used_blocks; i++) { 1052 struct ubi_wl_entry *e; 1053 1054 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); 1055 if (!e) { 1056 while (i--) 1057 kfree(fm->e[i]); 1058 1059 ret = -ENOMEM; 1060 goto free_hdr; 1061 } 1062 1063 e->pnum = be32_to_cpu(fmsb2->block_loc[i]); 1064 e->ec = be32_to_cpu(fmsb2->block_ec[i]); 1065 fm->e[i] = e; 1066 } 1067 1068 ubi->fm = fm; 1069 ubi->fm_pool.max_size = ubi->fm->max_pool_size; 1070 ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size; 1071 ubi_msg(ubi, "attached by fastmap"); 1072 ubi_msg(ubi, "fastmap pool size: %d", ubi->fm_pool.max_size); 1073 ubi_msg(ubi, "fastmap WL pool size: %d", 1074 ubi->fm_wl_pool.max_size); 1075 ubi->fm_disabled = 0; 1076 1077 ubi_free_vid_hdr(ubi, vh); 1078 kfree(ech); 1079 out: 1080 up_write(&ubi->fm_protect); 1081 if (ret == UBI_BAD_FASTMAP) 1082 ubi_err(ubi, "Attach by fastmap failed, doing a full scan!"); 1083 return ret; 1084 1085 free_hdr: 1086 ubi_free_vid_hdr(ubi, vh); 1087 kfree(ech); 1088 free_fm_sb: 1089 kfree(fmsb); 1090 kfree(fm); 1091 goto out; 1092 } 1093 1094 /** 1095 * ubi_write_fastmap - writes a fastmap. 1096 * @ubi: UBI device object 1097 * @new_fm: the to be written fastmap 1098 * 1099 * Returns 0 on success, < 0 indicates an internal error. 1100 */ 1101 static int ubi_write_fastmap(struct ubi_device *ubi, 1102 struct ubi_fastmap_layout *new_fm) 1103 { 1104 size_t fm_pos = 0; 1105 void *fm_raw; 1106 struct ubi_fm_sb *fmsb; 1107 struct ubi_fm_hdr *fmh; 1108 struct ubi_fm_scan_pool *fmpl, *fmpl_wl; 1109 struct ubi_fm_ec *fec; 1110 struct ubi_fm_volhdr *fvh; 1111 struct ubi_fm_eba *feba; 1112 struct ubi_wl_entry *wl_e; 1113 struct ubi_volume *vol; 1114 struct ubi_vid_hdr *avhdr, *dvhdr; 1115 struct ubi_work *ubi_wrk; 1116 struct rb_node *tmp_rb; 1117 int ret, i, j, free_peb_count, used_peb_count, vol_count; 1118 int scrub_peb_count, erase_peb_count; 1119 int *seen_pebs = NULL; 1120 1121 fm_raw = ubi->fm_buf; 1122 memset(ubi->fm_buf, 0, ubi->fm_size); 1123 1124 avhdr = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID); 1125 if (!avhdr) { 1126 ret = -ENOMEM; 1127 goto out; 1128 } 1129 1130 dvhdr = new_fm_vhdr(ubi, UBI_FM_DATA_VOLUME_ID); 1131 if (!dvhdr) { 1132 ret = -ENOMEM; 1133 goto out_kfree; 1134 } 1135 1136 seen_pebs = init_seen(ubi); 1137 if (IS_ERR(seen_pebs)) { 1138 ret = PTR_ERR(seen_pebs); 1139 goto out_kfree; 1140 } 1141 1142 spin_lock(&ubi->volumes_lock); 1143 spin_lock(&ubi->wl_lock); 1144 1145 fmsb = (struct ubi_fm_sb *)fm_raw; 1146 fm_pos += sizeof(*fmsb); 1147 ubi_assert(fm_pos <= ubi->fm_size); 1148 1149 fmh = (struct ubi_fm_hdr *)(fm_raw + fm_pos); 1150 fm_pos += sizeof(*fmh); 1151 ubi_assert(fm_pos <= ubi->fm_size); 1152 1153 fmsb->magic = cpu_to_be32(UBI_FM_SB_MAGIC); 1154 fmsb->version = UBI_FM_FMT_VERSION; 1155 fmsb->used_blocks = cpu_to_be32(new_fm->used_blocks); 1156 /* the max sqnum will be filled in while *reading* the fastmap */ 1157 fmsb->sqnum = 0; 1158 1159 fmh->magic = cpu_to_be32(UBI_FM_HDR_MAGIC); 1160 free_peb_count = 0; 1161 used_peb_count = 0; 1162 scrub_peb_count = 0; 1163 erase_peb_count = 0; 1164 vol_count = 0; 1165 1166 fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos); 1167 fm_pos += sizeof(*fmpl); 1168 fmpl->magic = cpu_to_be32(UBI_FM_POOL_MAGIC); 1169 fmpl->size = cpu_to_be16(ubi->fm_pool.size); 1170 fmpl->max_size = cpu_to_be16(ubi->fm_pool.max_size); 1171 1172 for (i = 0; i < ubi->fm_pool.size; i++) { 1173 fmpl->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]); 1174 set_seen(ubi, ubi->fm_pool.pebs[i], seen_pebs); 1175 } 1176 1177 fmpl_wl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos); 1178 fm_pos += sizeof(*fmpl_wl); 1179 fmpl_wl->magic = cpu_to_be32(UBI_FM_POOL_MAGIC); 1180 fmpl_wl->size = cpu_to_be16(ubi->fm_wl_pool.size); 1181 fmpl_wl->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size); 1182 1183 for (i = 0; i < ubi->fm_wl_pool.size; i++) { 1184 fmpl_wl->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]); 1185 set_seen(ubi, ubi->fm_wl_pool.pebs[i], seen_pebs); 1186 } 1187 1188 ubi_for_each_free_peb(ubi, wl_e, tmp_rb) { 1189 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 1190 1191 fec->pnum = cpu_to_be32(wl_e->pnum); 1192 set_seen(ubi, wl_e->pnum, seen_pebs); 1193 fec->ec = cpu_to_be32(wl_e->ec); 1194 1195 free_peb_count++; 1196 fm_pos += sizeof(*fec); 1197 ubi_assert(fm_pos <= ubi->fm_size); 1198 } 1199 fmh->free_peb_count = cpu_to_be32(free_peb_count); 1200 1201 ubi_for_each_used_peb(ubi, wl_e, tmp_rb) { 1202 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 1203 1204 fec->pnum = cpu_to_be32(wl_e->pnum); 1205 set_seen(ubi, wl_e->pnum, seen_pebs); 1206 fec->ec = cpu_to_be32(wl_e->ec); 1207 1208 used_peb_count++; 1209 fm_pos += sizeof(*fec); 1210 ubi_assert(fm_pos <= ubi->fm_size); 1211 } 1212 1213 ubi_for_each_protected_peb(ubi, i, wl_e) { 1214 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 1215 1216 fec->pnum = cpu_to_be32(wl_e->pnum); 1217 set_seen(ubi, wl_e->pnum, seen_pebs); 1218 fec->ec = cpu_to_be32(wl_e->ec); 1219 1220 used_peb_count++; 1221 fm_pos += sizeof(*fec); 1222 ubi_assert(fm_pos <= ubi->fm_size); 1223 } 1224 fmh->used_peb_count = cpu_to_be32(used_peb_count); 1225 1226 ubi_for_each_scrub_peb(ubi, wl_e, tmp_rb) { 1227 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 1228 1229 fec->pnum = cpu_to_be32(wl_e->pnum); 1230 set_seen(ubi, wl_e->pnum, seen_pebs); 1231 fec->ec = cpu_to_be32(wl_e->ec); 1232 1233 scrub_peb_count++; 1234 fm_pos += sizeof(*fec); 1235 ubi_assert(fm_pos <= ubi->fm_size); 1236 } 1237 fmh->scrub_peb_count = cpu_to_be32(scrub_peb_count); 1238 1239 1240 list_for_each_entry(ubi_wrk, &ubi->works, list) { 1241 if (ubi_is_erase_work(ubi_wrk)) { 1242 wl_e = ubi_wrk->e; 1243 ubi_assert(wl_e); 1244 1245 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 1246 1247 fec->pnum = cpu_to_be32(wl_e->pnum); 1248 set_seen(ubi, wl_e->pnum, seen_pebs); 1249 fec->ec = cpu_to_be32(wl_e->ec); 1250 1251 erase_peb_count++; 1252 fm_pos += sizeof(*fec); 1253 ubi_assert(fm_pos <= ubi->fm_size); 1254 } 1255 } 1256 fmh->erase_peb_count = cpu_to_be32(erase_peb_count); 1257 1258 for (i = 0; i < UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT; i++) { 1259 vol = ubi->volumes[i]; 1260 1261 if (!vol) 1262 continue; 1263 1264 vol_count++; 1265 1266 fvh = (struct ubi_fm_volhdr *)(fm_raw + fm_pos); 1267 fm_pos += sizeof(*fvh); 1268 ubi_assert(fm_pos <= ubi->fm_size); 1269 1270 fvh->magic = cpu_to_be32(UBI_FM_VHDR_MAGIC); 1271 fvh->vol_id = cpu_to_be32(vol->vol_id); 1272 fvh->vol_type = vol->vol_type; 1273 fvh->used_ebs = cpu_to_be32(vol->used_ebs); 1274 fvh->data_pad = cpu_to_be32(vol->data_pad); 1275 fvh->last_eb_bytes = cpu_to_be32(vol->last_eb_bytes); 1276 1277 ubi_assert(vol->vol_type == UBI_DYNAMIC_VOLUME || 1278 vol->vol_type == UBI_STATIC_VOLUME); 1279 1280 feba = (struct ubi_fm_eba *)(fm_raw + fm_pos); 1281 fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->reserved_pebs); 1282 ubi_assert(fm_pos <= ubi->fm_size); 1283 1284 for (j = 0; j < vol->reserved_pebs; j++) 1285 feba->pnum[j] = cpu_to_be32(vol->eba_tbl[j]); 1286 1287 feba->reserved_pebs = cpu_to_be32(j); 1288 feba->magic = cpu_to_be32(UBI_FM_EBA_MAGIC); 1289 } 1290 fmh->vol_count = cpu_to_be32(vol_count); 1291 fmh->bad_peb_count = cpu_to_be32(ubi->bad_peb_count); 1292 1293 avhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); 1294 avhdr->lnum = 0; 1295 1296 spin_unlock(&ubi->wl_lock); 1297 spin_unlock(&ubi->volumes_lock); 1298 1299 dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum); 1300 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avhdr); 1301 if (ret) { 1302 ubi_err(ubi, "unable to write vid_hdr to fastmap SB!"); 1303 goto out_kfree; 1304 } 1305 1306 for (i = 0; i < new_fm->used_blocks; i++) { 1307 fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum); 1308 set_seen(ubi, new_fm->e[i]->pnum, seen_pebs); 1309 fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec); 1310 } 1311 1312 fmsb->data_crc = 0; 1313 fmsb->data_crc = cpu_to_be32(crc32(UBI_CRC32_INIT, fm_raw, 1314 ubi->fm_size)); 1315 1316 for (i = 1; i < new_fm->used_blocks; i++) { 1317 dvhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); 1318 dvhdr->lnum = cpu_to_be32(i); 1319 dbg_bld("writing fastmap data to PEB %i sqnum %llu", 1320 new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum)); 1321 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvhdr); 1322 if (ret) { 1323 ubi_err(ubi, "unable to write vid_hdr to PEB %i!", 1324 new_fm->e[i]->pnum); 1325 goto out_kfree; 1326 } 1327 } 1328 1329 for (i = 0; i < new_fm->used_blocks; i++) { 1330 ret = ubi_io_write(ubi, fm_raw + (i * ubi->leb_size), 1331 new_fm->e[i]->pnum, ubi->leb_start, ubi->leb_size); 1332 if (ret) { 1333 ubi_err(ubi, "unable to write fastmap to PEB %i!", 1334 new_fm->e[i]->pnum); 1335 goto out_kfree; 1336 } 1337 } 1338 1339 ubi_assert(new_fm); 1340 ubi->fm = new_fm; 1341 1342 ret = self_check_seen(ubi, seen_pebs); 1343 dbg_bld("fastmap written!"); 1344 1345 out_kfree: 1346 ubi_free_vid_hdr(ubi, avhdr); 1347 ubi_free_vid_hdr(ubi, dvhdr); 1348 free_seen(seen_pebs); 1349 out: 1350 return ret; 1351 } 1352 1353 /** 1354 * erase_block - Manually erase a PEB. 1355 * @ubi: UBI device object 1356 * @pnum: PEB to be erased 1357 * 1358 * Returns the new EC value on success, < 0 indicates an internal error. 1359 */ 1360 static int erase_block(struct ubi_device *ubi, int pnum) 1361 { 1362 int ret; 1363 struct ubi_ec_hdr *ec_hdr; 1364 long long ec; 1365 1366 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); 1367 if (!ec_hdr) 1368 return -ENOMEM; 1369 1370 ret = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0); 1371 if (ret < 0) 1372 goto out; 1373 else if (ret && ret != UBI_IO_BITFLIPS) { 1374 ret = -EINVAL; 1375 goto out; 1376 } 1377 1378 ret = ubi_io_sync_erase(ubi, pnum, 0); 1379 if (ret < 0) 1380 goto out; 1381 1382 ec = be64_to_cpu(ec_hdr->ec); 1383 ec += ret; 1384 if (ec > UBI_MAX_ERASECOUNTER) { 1385 ret = -EINVAL; 1386 goto out; 1387 } 1388 1389 ec_hdr->ec = cpu_to_be64(ec); 1390 ret = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr); 1391 if (ret < 0) 1392 goto out; 1393 1394 ret = ec; 1395 out: 1396 kfree(ec_hdr); 1397 return ret; 1398 } 1399 1400 /** 1401 * invalidate_fastmap - destroys a fastmap. 1402 * @ubi: UBI device object 1403 * 1404 * This function ensures that upon next UBI attach a full scan 1405 * is issued. We need this if UBI is about to write a new fastmap 1406 * but is unable to do so. In this case we have two options: 1407 * a) Make sure that the current fastmap will not be usued upon 1408 * attach time and contine or b) fall back to RO mode to have the 1409 * current fastmap in a valid state. 1410 * Returns 0 on success, < 0 indicates an internal error. 1411 */ 1412 static int invalidate_fastmap(struct ubi_device *ubi) 1413 { 1414 int ret; 1415 struct ubi_fastmap_layout *fm; 1416 struct ubi_wl_entry *e; 1417 struct ubi_vid_hdr *vh = NULL; 1418 1419 if (!ubi->fm) 1420 return 0; 1421 1422 ubi->fm = NULL; 1423 1424 ret = -ENOMEM; 1425 fm = kzalloc(sizeof(*fm), GFP_KERNEL); 1426 if (!fm) 1427 goto out; 1428 1429 vh = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID); 1430 if (!vh) 1431 goto out_free_fm; 1432 1433 ret = -ENOSPC; 1434 e = ubi_wl_get_fm_peb(ubi, 1); 1435 if (!e) 1436 goto out_free_fm; 1437 1438 /* 1439 * Create fake fastmap such that UBI will fall back 1440 * to scanning mode. 1441 */ 1442 vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); 1443 ret = ubi_io_write_vid_hdr(ubi, e->pnum, vh); 1444 if (ret < 0) { 1445 ubi_wl_put_fm_peb(ubi, e, 0, 0); 1446 goto out_free_fm; 1447 } 1448 1449 fm->used_blocks = 1; 1450 fm->e[0] = e; 1451 1452 ubi->fm = fm; 1453 1454 out: 1455 ubi_free_vid_hdr(ubi, vh); 1456 return ret; 1457 1458 out_free_fm: 1459 kfree(fm); 1460 goto out; 1461 } 1462 1463 /** 1464 * return_fm_pebs - returns all PEBs used by a fastmap back to the 1465 * WL sub-system. 1466 * @ubi: UBI device object 1467 * @fm: fastmap layout object 1468 */ 1469 static void return_fm_pebs(struct ubi_device *ubi, 1470 struct ubi_fastmap_layout *fm) 1471 { 1472 int i; 1473 1474 if (!fm) 1475 return; 1476 1477 for (i = 0; i < fm->used_blocks; i++) { 1478 if (fm->e[i]) { 1479 ubi_wl_put_fm_peb(ubi, fm->e[i], i, 1480 fm->to_be_tortured[i]); 1481 fm->e[i] = NULL; 1482 } 1483 } 1484 } 1485 1486 /** 1487 * ubi_update_fastmap - will be called by UBI if a volume changes or 1488 * a fastmap pool becomes full. 1489 * @ubi: UBI device object 1490 * 1491 * Returns 0 on success, < 0 indicates an internal error. 1492 */ 1493 int ubi_update_fastmap(struct ubi_device *ubi) 1494 { 1495 int ret, i, j; 1496 struct ubi_fastmap_layout *new_fm, *old_fm; 1497 struct ubi_wl_entry *tmp_e; 1498 1499 down_write(&ubi->fm_protect); 1500 1501 ubi_refill_pools(ubi); 1502 1503 if (ubi->ro_mode || ubi->fm_disabled) { 1504 up_write(&ubi->fm_protect); 1505 return 0; 1506 } 1507 1508 ret = ubi_ensure_anchor_pebs(ubi); 1509 if (ret) { 1510 up_write(&ubi->fm_protect); 1511 return ret; 1512 } 1513 1514 new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL); 1515 if (!new_fm) { 1516 up_write(&ubi->fm_protect); 1517 return -ENOMEM; 1518 } 1519 1520 new_fm->used_blocks = ubi->fm_size / ubi->leb_size; 1521 old_fm = ubi->fm; 1522 ubi->fm = NULL; 1523 1524 if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) { 1525 ubi_err(ubi, "fastmap too large"); 1526 ret = -ENOSPC; 1527 goto err; 1528 } 1529 1530 for (i = 1; i < new_fm->used_blocks; i++) { 1531 spin_lock(&ubi->wl_lock); 1532 tmp_e = ubi_wl_get_fm_peb(ubi, 0); 1533 spin_unlock(&ubi->wl_lock); 1534 1535 if (!tmp_e) { 1536 if (old_fm && old_fm->e[i]) { 1537 ret = erase_block(ubi, old_fm->e[i]->pnum); 1538 if (ret < 0) { 1539 ubi_err(ubi, "could not erase old fastmap PEB"); 1540 1541 for (j = 1; j < i; j++) { 1542 ubi_wl_put_fm_peb(ubi, new_fm->e[j], 1543 j, 0); 1544 new_fm->e[j] = NULL; 1545 } 1546 goto err; 1547 } 1548 new_fm->e[i] = old_fm->e[i]; 1549 old_fm->e[i] = NULL; 1550 } else { 1551 ubi_err(ubi, "could not get any free erase block"); 1552 1553 for (j = 1; j < i; j++) { 1554 ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0); 1555 new_fm->e[j] = NULL; 1556 } 1557 1558 ret = -ENOSPC; 1559 goto err; 1560 } 1561 } else { 1562 new_fm->e[i] = tmp_e; 1563 1564 if (old_fm && old_fm->e[i]) { 1565 ubi_wl_put_fm_peb(ubi, old_fm->e[i], i, 1566 old_fm->to_be_tortured[i]); 1567 old_fm->e[i] = NULL; 1568 } 1569 } 1570 } 1571 1572 /* Old fastmap is larger than the new one */ 1573 if (old_fm && new_fm->used_blocks < old_fm->used_blocks) { 1574 for (i = new_fm->used_blocks; i < old_fm->used_blocks; i++) { 1575 ubi_wl_put_fm_peb(ubi, old_fm->e[i], i, 1576 old_fm->to_be_tortured[i]); 1577 old_fm->e[i] = NULL; 1578 } 1579 } 1580 1581 spin_lock(&ubi->wl_lock); 1582 tmp_e = ubi_wl_get_fm_peb(ubi, 1); 1583 spin_unlock(&ubi->wl_lock); 1584 1585 if (old_fm) { 1586 /* no fresh anchor PEB was found, reuse the old one */ 1587 if (!tmp_e) { 1588 ret = erase_block(ubi, old_fm->e[0]->pnum); 1589 if (ret < 0) { 1590 ubi_err(ubi, "could not erase old anchor PEB"); 1591 1592 for (i = 1; i < new_fm->used_blocks; i++) { 1593 ubi_wl_put_fm_peb(ubi, new_fm->e[i], 1594 i, 0); 1595 new_fm->e[i] = NULL; 1596 } 1597 goto err; 1598 } 1599 new_fm->e[0] = old_fm->e[0]; 1600 new_fm->e[0]->ec = ret; 1601 old_fm->e[0] = NULL; 1602 } else { 1603 /* we've got a new anchor PEB, return the old one */ 1604 ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0, 1605 old_fm->to_be_tortured[0]); 1606 new_fm->e[0] = tmp_e; 1607 old_fm->e[0] = NULL; 1608 } 1609 } else { 1610 if (!tmp_e) { 1611 ubi_err(ubi, "could not find any anchor PEB"); 1612 1613 for (i = 1; i < new_fm->used_blocks; i++) { 1614 ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0); 1615 new_fm->e[i] = NULL; 1616 } 1617 1618 ret = -ENOSPC; 1619 goto err; 1620 } 1621 new_fm->e[0] = tmp_e; 1622 } 1623 1624 down_write(&ubi->work_sem); 1625 down_write(&ubi->fm_eba_sem); 1626 ret = ubi_write_fastmap(ubi, new_fm); 1627 up_write(&ubi->fm_eba_sem); 1628 up_write(&ubi->work_sem); 1629 1630 if (ret) 1631 goto err; 1632 1633 out_unlock: 1634 up_write(&ubi->fm_protect); 1635 kfree(old_fm); 1636 return ret; 1637 1638 err: 1639 ubi_warn(ubi, "Unable to write new fastmap, err=%i", ret); 1640 1641 ret = invalidate_fastmap(ubi); 1642 if (ret < 0) { 1643 ubi_err(ubi, "Unable to invalidiate current fastmap!"); 1644 ubi_ro_mode(ubi); 1645 } else { 1646 return_fm_pebs(ubi, old_fm); 1647 return_fm_pebs(ubi, new_fm); 1648 ret = 0; 1649 } 1650 1651 kfree(new_fm); 1652 goto out_unlock; 1653 } 1654