1 /* 2 * Copyright (c) 2012 Linutronix GmbH 3 * Copyright (c) 2014 sigma star gmbh 4 * Author: Richard Weinberger <richard@nod.at> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; version 2. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See 13 * the GNU General Public License for more details. 14 * 15 */ 16 17 #include <linux/crc32.h> 18 #include <linux/bitmap.h> 19 #include "ubi.h" 20 21 /** 22 * init_seen - allocate memory for used for debugging. 23 * @ubi: UBI device description object 24 */ 25 static inline unsigned long *init_seen(struct ubi_device *ubi) 26 { 27 unsigned long *ret; 28 29 if (!ubi_dbg_chk_fastmap(ubi)) 30 return NULL; 31 32 ret = kcalloc(BITS_TO_LONGS(ubi->peb_count), sizeof(unsigned long), 33 GFP_KERNEL); 34 if (!ret) 35 return ERR_PTR(-ENOMEM); 36 37 return ret; 38 } 39 40 /** 41 * free_seen - free the seen logic integer array. 42 * @seen: integer array of @ubi->peb_count size 43 */ 44 static inline void free_seen(unsigned long *seen) 45 { 46 kfree(seen); 47 } 48 49 /** 50 * set_seen - mark a PEB as seen. 51 * @ubi: UBI device description object 52 * @pnum: The PEB to be makred as seen 53 * @seen: integer array of @ubi->peb_count size 54 */ 55 static inline void set_seen(struct ubi_device *ubi, int pnum, unsigned long *seen) 56 { 57 if (!ubi_dbg_chk_fastmap(ubi) || !seen) 58 return; 59 60 set_bit(pnum, seen); 61 } 62 63 /** 64 * self_check_seen - check whether all PEB have been seen by fastmap. 65 * @ubi: UBI device description object 66 * @seen: integer array of @ubi->peb_count size 67 */ 68 static int self_check_seen(struct ubi_device *ubi, unsigned long *seen) 69 { 70 int pnum, ret = 0; 71 72 if (!ubi_dbg_chk_fastmap(ubi) || !seen) 73 return 0; 74 75 for (pnum = 0; pnum < ubi->peb_count; pnum++) { 76 if (test_bit(pnum, seen) && ubi->lookuptbl[pnum]) { 77 ubi_err(ubi, "self-check failed for PEB %d, fastmap didn't see it", pnum); 78 ret = -EINVAL; 79 } 80 } 81 82 return ret; 83 } 84 85 /** 86 * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device. 87 * @ubi: UBI device description object 88 */ 89 size_t ubi_calc_fm_size(struct ubi_device *ubi) 90 { 91 size_t size; 92 93 size = sizeof(struct ubi_fm_sb) + 94 sizeof(struct ubi_fm_hdr) + 95 sizeof(struct ubi_fm_scan_pool) + 96 sizeof(struct ubi_fm_scan_pool) + 97 (ubi->peb_count * sizeof(struct ubi_fm_ec)) + 98 (sizeof(struct ubi_fm_eba) + 99 (ubi->peb_count * sizeof(__be32))) + 100 sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES; 101 return roundup(size, ubi->leb_size); 102 } 103 104 105 /** 106 * new_fm_vhdr - allocate a new volume header for fastmap usage. 107 * @ubi: UBI device description object 108 * @vol_id: the VID of the new header 109 * 110 * Returns a new struct ubi_vid_hdr on success. 111 * NULL indicates out of memory. 112 */ 113 static struct ubi_vid_hdr *new_fm_vhdr(struct ubi_device *ubi, int vol_id) 114 { 115 struct ubi_vid_hdr *new; 116 117 new = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); 118 if (!new) 119 goto out; 120 121 new->vol_type = UBI_VID_DYNAMIC; 122 new->vol_id = cpu_to_be32(vol_id); 123 124 /* UBI implementations without fastmap support have to delete the 125 * fastmap. 126 */ 127 new->compat = UBI_COMPAT_DELETE; 128 129 out: 130 return new; 131 } 132 133 /** 134 * add_aeb - create and add a attach erase block to a given list. 135 * @ai: UBI attach info object 136 * @list: the target list 137 * @pnum: PEB number of the new attach erase block 138 * @ec: erease counter of the new LEB 139 * @scrub: scrub this PEB after attaching 140 * 141 * Returns 0 on success, < 0 indicates an internal error. 142 */ 143 static int add_aeb(struct ubi_attach_info *ai, struct list_head *list, 144 int pnum, int ec, int scrub) 145 { 146 struct ubi_ainf_peb *aeb; 147 148 aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL); 149 if (!aeb) 150 return -ENOMEM; 151 152 aeb->pnum = pnum; 153 aeb->ec = ec; 154 aeb->lnum = -1; 155 aeb->scrub = scrub; 156 aeb->copy_flag = aeb->sqnum = 0; 157 158 ai->ec_sum += aeb->ec; 159 ai->ec_count++; 160 161 if (ai->max_ec < aeb->ec) 162 ai->max_ec = aeb->ec; 163 164 if (ai->min_ec > aeb->ec) 165 ai->min_ec = aeb->ec; 166 167 list_add_tail(&aeb->u.list, list); 168 169 return 0; 170 } 171 172 /** 173 * add_vol - create and add a new volume to ubi_attach_info. 174 * @ai: ubi_attach_info object 175 * @vol_id: VID of the new volume 176 * @used_ebs: number of used EBS 177 * @data_pad: data padding value of the new volume 178 * @vol_type: volume type 179 * @last_eb_bytes: number of bytes in the last LEB 180 * 181 * Returns the new struct ubi_ainf_volume on success. 182 * NULL indicates an error. 183 */ 184 static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id, 185 int used_ebs, int data_pad, u8 vol_type, 186 int last_eb_bytes) 187 { 188 struct ubi_ainf_volume *av; 189 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL; 190 191 while (*p) { 192 parent = *p; 193 av = rb_entry(parent, struct ubi_ainf_volume, rb); 194 195 if (vol_id > av->vol_id) 196 p = &(*p)->rb_left; 197 else if (vol_id < av->vol_id) 198 p = &(*p)->rb_right; 199 else 200 return ERR_PTR(-EINVAL); 201 } 202 203 av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL); 204 if (!av) 205 goto out; 206 207 av->highest_lnum = av->leb_count = av->used_ebs = 0; 208 av->vol_id = vol_id; 209 av->data_pad = data_pad; 210 av->last_data_size = last_eb_bytes; 211 av->compat = 0; 212 av->vol_type = vol_type; 213 av->root = RB_ROOT; 214 if (av->vol_type == UBI_STATIC_VOLUME) 215 av->used_ebs = used_ebs; 216 217 dbg_bld("found volume (ID %i)", vol_id); 218 219 rb_link_node(&av->rb, parent, p); 220 rb_insert_color(&av->rb, &ai->volumes); 221 222 out: 223 return av; 224 } 225 226 /** 227 * assign_aeb_to_av - assigns a SEB to a given ainf_volume and removes it 228 * from it's original list. 229 * @ai: ubi_attach_info object 230 * @aeb: the to be assigned SEB 231 * @av: target scan volume 232 */ 233 static void assign_aeb_to_av(struct ubi_attach_info *ai, 234 struct ubi_ainf_peb *aeb, 235 struct ubi_ainf_volume *av) 236 { 237 struct ubi_ainf_peb *tmp_aeb; 238 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL; 239 240 p = &av->root.rb_node; 241 while (*p) { 242 parent = *p; 243 244 tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb); 245 if (aeb->lnum != tmp_aeb->lnum) { 246 if (aeb->lnum < tmp_aeb->lnum) 247 p = &(*p)->rb_left; 248 else 249 p = &(*p)->rb_right; 250 251 continue; 252 } else 253 break; 254 } 255 256 list_del(&aeb->u.list); 257 av->leb_count++; 258 259 rb_link_node(&aeb->u.rb, parent, p); 260 rb_insert_color(&aeb->u.rb, &av->root); 261 } 262 263 /** 264 * update_vol - inserts or updates a LEB which was found a pool. 265 * @ubi: the UBI device object 266 * @ai: attach info object 267 * @av: the volume this LEB belongs to 268 * @new_vh: the volume header derived from new_aeb 269 * @new_aeb: the AEB to be examined 270 * 271 * Returns 0 on success, < 0 indicates an internal error. 272 */ 273 static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai, 274 struct ubi_ainf_volume *av, struct ubi_vid_hdr *new_vh, 275 struct ubi_ainf_peb *new_aeb) 276 { 277 struct rb_node **p = &av->root.rb_node, *parent = NULL; 278 struct ubi_ainf_peb *aeb, *victim; 279 int cmp_res; 280 281 while (*p) { 282 parent = *p; 283 aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb); 284 285 if (be32_to_cpu(new_vh->lnum) != aeb->lnum) { 286 if (be32_to_cpu(new_vh->lnum) < aeb->lnum) 287 p = &(*p)->rb_left; 288 else 289 p = &(*p)->rb_right; 290 291 continue; 292 } 293 294 /* This case can happen if the fastmap gets written 295 * because of a volume change (creation, deletion, ..). 296 * Then a PEB can be within the persistent EBA and the pool. 297 */ 298 if (aeb->pnum == new_aeb->pnum) { 299 ubi_assert(aeb->lnum == new_aeb->lnum); 300 kmem_cache_free(ai->aeb_slab_cache, new_aeb); 301 302 return 0; 303 } 304 305 cmp_res = ubi_compare_lebs(ubi, aeb, new_aeb->pnum, new_vh); 306 if (cmp_res < 0) 307 return cmp_res; 308 309 /* new_aeb is newer */ 310 if (cmp_res & 1) { 311 victim = kmem_cache_alloc(ai->aeb_slab_cache, 312 GFP_KERNEL); 313 if (!victim) 314 return -ENOMEM; 315 316 victim->ec = aeb->ec; 317 victim->pnum = aeb->pnum; 318 list_add_tail(&victim->u.list, &ai->erase); 319 320 if (av->highest_lnum == be32_to_cpu(new_vh->lnum)) 321 av->last_data_size = 322 be32_to_cpu(new_vh->data_size); 323 324 dbg_bld("vol %i: AEB %i's PEB %i is the newer", 325 av->vol_id, aeb->lnum, new_aeb->pnum); 326 327 aeb->ec = new_aeb->ec; 328 aeb->pnum = new_aeb->pnum; 329 aeb->copy_flag = new_vh->copy_flag; 330 aeb->scrub = new_aeb->scrub; 331 kmem_cache_free(ai->aeb_slab_cache, new_aeb); 332 333 /* new_aeb is older */ 334 } else { 335 dbg_bld("vol %i: AEB %i's PEB %i is old, dropping it", 336 av->vol_id, aeb->lnum, new_aeb->pnum); 337 list_add_tail(&new_aeb->u.list, &ai->erase); 338 } 339 340 return 0; 341 } 342 /* This LEB is new, let's add it to the volume */ 343 344 if (av->highest_lnum <= be32_to_cpu(new_vh->lnum)) { 345 av->highest_lnum = be32_to_cpu(new_vh->lnum); 346 av->last_data_size = be32_to_cpu(new_vh->data_size); 347 } 348 349 if (av->vol_type == UBI_STATIC_VOLUME) 350 av->used_ebs = be32_to_cpu(new_vh->used_ebs); 351 352 av->leb_count++; 353 354 rb_link_node(&new_aeb->u.rb, parent, p); 355 rb_insert_color(&new_aeb->u.rb, &av->root); 356 357 return 0; 358 } 359 360 /** 361 * process_pool_aeb - we found a non-empty PEB in a pool. 362 * @ubi: UBI device object 363 * @ai: attach info object 364 * @new_vh: the volume header derived from new_aeb 365 * @new_aeb: the AEB to be examined 366 * 367 * Returns 0 on success, < 0 indicates an internal error. 368 */ 369 static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai, 370 struct ubi_vid_hdr *new_vh, 371 struct ubi_ainf_peb *new_aeb) 372 { 373 struct ubi_ainf_volume *av, *tmp_av = NULL; 374 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL; 375 int found = 0; 376 377 if (be32_to_cpu(new_vh->vol_id) == UBI_FM_SB_VOLUME_ID || 378 be32_to_cpu(new_vh->vol_id) == UBI_FM_DATA_VOLUME_ID) { 379 kmem_cache_free(ai->aeb_slab_cache, new_aeb); 380 381 return 0; 382 } 383 384 /* Find the volume this SEB belongs to */ 385 while (*p) { 386 parent = *p; 387 tmp_av = rb_entry(parent, struct ubi_ainf_volume, rb); 388 389 if (be32_to_cpu(new_vh->vol_id) > tmp_av->vol_id) 390 p = &(*p)->rb_left; 391 else if (be32_to_cpu(new_vh->vol_id) < tmp_av->vol_id) 392 p = &(*p)->rb_right; 393 else { 394 found = 1; 395 break; 396 } 397 } 398 399 if (found) 400 av = tmp_av; 401 else { 402 ubi_err(ubi, "orphaned volume in fastmap pool!"); 403 kmem_cache_free(ai->aeb_slab_cache, new_aeb); 404 return UBI_BAD_FASTMAP; 405 } 406 407 ubi_assert(be32_to_cpu(new_vh->vol_id) == av->vol_id); 408 409 return update_vol(ubi, ai, av, new_vh, new_aeb); 410 } 411 412 /** 413 * unmap_peb - unmap a PEB. 414 * If fastmap detects a free PEB in the pool it has to check whether 415 * this PEB has been unmapped after writing the fastmap. 416 * 417 * @ai: UBI attach info object 418 * @pnum: The PEB to be unmapped 419 */ 420 static void unmap_peb(struct ubi_attach_info *ai, int pnum) 421 { 422 struct ubi_ainf_volume *av; 423 struct rb_node *node, *node2; 424 struct ubi_ainf_peb *aeb; 425 426 for (node = rb_first(&ai->volumes); node; node = rb_next(node)) { 427 av = rb_entry(node, struct ubi_ainf_volume, rb); 428 429 for (node2 = rb_first(&av->root); node2; 430 node2 = rb_next(node2)) { 431 aeb = rb_entry(node2, struct ubi_ainf_peb, u.rb); 432 if (aeb->pnum == pnum) { 433 rb_erase(&aeb->u.rb, &av->root); 434 av->leb_count--; 435 kmem_cache_free(ai->aeb_slab_cache, aeb); 436 return; 437 } 438 } 439 } 440 } 441 442 /** 443 * scan_pool - scans a pool for changed (no longer empty PEBs). 444 * @ubi: UBI device object 445 * @ai: attach info object 446 * @pebs: an array of all PEB numbers in the to be scanned pool 447 * @pool_size: size of the pool (number of entries in @pebs) 448 * @max_sqnum: pointer to the maximal sequence number 449 * @free: list of PEBs which are most likely free (and go into @ai->free) 450 * 451 * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned. 452 * < 0 indicates an internal error. 453 */ 454 static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai, 455 __be32 *pebs, int pool_size, unsigned long long *max_sqnum, 456 struct list_head *free) 457 { 458 struct ubi_vid_hdr *vh; 459 struct ubi_ec_hdr *ech; 460 struct ubi_ainf_peb *new_aeb; 461 int i, pnum, err, ret = 0; 462 463 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); 464 if (!ech) 465 return -ENOMEM; 466 467 vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); 468 if (!vh) { 469 kfree(ech); 470 return -ENOMEM; 471 } 472 473 dbg_bld("scanning fastmap pool: size = %i", pool_size); 474 475 /* 476 * Now scan all PEBs in the pool to find changes which have been made 477 * after the creation of the fastmap 478 */ 479 for (i = 0; i < pool_size; i++) { 480 int scrub = 0; 481 int image_seq; 482 483 pnum = be32_to_cpu(pebs[i]); 484 485 if (ubi_io_is_bad(ubi, pnum)) { 486 ubi_err(ubi, "bad PEB in fastmap pool!"); 487 ret = UBI_BAD_FASTMAP; 488 goto out; 489 } 490 491 err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0); 492 if (err && err != UBI_IO_BITFLIPS) { 493 ubi_err(ubi, "unable to read EC header! PEB:%i err:%i", 494 pnum, err); 495 ret = err > 0 ? UBI_BAD_FASTMAP : err; 496 goto out; 497 } else if (err == UBI_IO_BITFLIPS) 498 scrub = 1; 499 500 /* 501 * Older UBI implementations have image_seq set to zero, so 502 * we shouldn't fail if image_seq == 0. 503 */ 504 image_seq = be32_to_cpu(ech->image_seq); 505 506 if (image_seq && (image_seq != ubi->image_seq)) { 507 ubi_err(ubi, "bad image seq: 0x%x, expected: 0x%x", 508 be32_to_cpu(ech->image_seq), ubi->image_seq); 509 ret = UBI_BAD_FASTMAP; 510 goto out; 511 } 512 513 err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0); 514 if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) { 515 unsigned long long ec = be64_to_cpu(ech->ec); 516 unmap_peb(ai, pnum); 517 dbg_bld("Adding PEB to free: %i", pnum); 518 if (err == UBI_IO_FF_BITFLIPS) 519 add_aeb(ai, free, pnum, ec, 1); 520 else 521 add_aeb(ai, free, pnum, ec, 0); 522 continue; 523 } else if (err == 0 || err == UBI_IO_BITFLIPS) { 524 dbg_bld("Found non empty PEB:%i in pool", pnum); 525 526 if (err == UBI_IO_BITFLIPS) 527 scrub = 1; 528 529 new_aeb = kmem_cache_alloc(ai->aeb_slab_cache, 530 GFP_KERNEL); 531 if (!new_aeb) { 532 ret = -ENOMEM; 533 goto out; 534 } 535 536 new_aeb->ec = be64_to_cpu(ech->ec); 537 new_aeb->pnum = pnum; 538 new_aeb->lnum = be32_to_cpu(vh->lnum); 539 new_aeb->sqnum = be64_to_cpu(vh->sqnum); 540 new_aeb->copy_flag = vh->copy_flag; 541 new_aeb->scrub = scrub; 542 543 if (*max_sqnum < new_aeb->sqnum) 544 *max_sqnum = new_aeb->sqnum; 545 546 err = process_pool_aeb(ubi, ai, vh, new_aeb); 547 if (err) { 548 ret = err > 0 ? UBI_BAD_FASTMAP : err; 549 goto out; 550 } 551 } else { 552 /* We are paranoid and fall back to scanning mode */ 553 ubi_err(ubi, "fastmap pool PEBs contains damaged PEBs!"); 554 ret = err > 0 ? UBI_BAD_FASTMAP : err; 555 goto out; 556 } 557 558 } 559 560 out: 561 ubi_free_vid_hdr(ubi, vh); 562 kfree(ech); 563 return ret; 564 } 565 566 /** 567 * count_fastmap_pebs - Counts the PEBs found by fastmap. 568 * @ai: The UBI attach info object 569 */ 570 static int count_fastmap_pebs(struct ubi_attach_info *ai) 571 { 572 struct ubi_ainf_peb *aeb; 573 struct ubi_ainf_volume *av; 574 struct rb_node *rb1, *rb2; 575 int n = 0; 576 577 list_for_each_entry(aeb, &ai->erase, u.list) 578 n++; 579 580 list_for_each_entry(aeb, &ai->free, u.list) 581 n++; 582 583 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) 584 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) 585 n++; 586 587 return n; 588 } 589 590 /** 591 * ubi_attach_fastmap - creates ubi_attach_info from a fastmap. 592 * @ubi: UBI device object 593 * @ai: UBI attach info object 594 * @fm: the fastmap to be attached 595 * 596 * Returns 0 on success, UBI_BAD_FASTMAP if the found fastmap was unusable. 597 * < 0 indicates an internal error. 598 */ 599 static int ubi_attach_fastmap(struct ubi_device *ubi, 600 struct ubi_attach_info *ai, 601 struct ubi_fastmap_layout *fm) 602 { 603 struct list_head used, free; 604 struct ubi_ainf_volume *av; 605 struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb; 606 struct ubi_fm_sb *fmsb; 607 struct ubi_fm_hdr *fmhdr; 608 struct ubi_fm_scan_pool *fmpl, *fmpl_wl; 609 struct ubi_fm_ec *fmec; 610 struct ubi_fm_volhdr *fmvhdr; 611 struct ubi_fm_eba *fm_eba; 612 int ret, i, j, pool_size, wl_pool_size; 613 size_t fm_pos = 0, fm_size = ubi->fm_size; 614 unsigned long long max_sqnum = 0; 615 void *fm_raw = ubi->fm_buf; 616 617 INIT_LIST_HEAD(&used); 618 INIT_LIST_HEAD(&free); 619 ai->min_ec = UBI_MAX_ERASECOUNTER; 620 621 fmsb = (struct ubi_fm_sb *)(fm_raw); 622 ai->max_sqnum = fmsb->sqnum; 623 fm_pos += sizeof(struct ubi_fm_sb); 624 if (fm_pos >= fm_size) 625 goto fail_bad; 626 627 fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos); 628 fm_pos += sizeof(*fmhdr); 629 if (fm_pos >= fm_size) 630 goto fail_bad; 631 632 if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) { 633 ubi_err(ubi, "bad fastmap header magic: 0x%x, expected: 0x%x", 634 be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC); 635 goto fail_bad; 636 } 637 638 fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos); 639 fm_pos += sizeof(*fmpl); 640 if (fm_pos >= fm_size) 641 goto fail_bad; 642 if (be32_to_cpu(fmpl->magic) != UBI_FM_POOL_MAGIC) { 643 ubi_err(ubi, "bad fastmap pool magic: 0x%x, expected: 0x%x", 644 be32_to_cpu(fmpl->magic), UBI_FM_POOL_MAGIC); 645 goto fail_bad; 646 } 647 648 fmpl_wl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos); 649 fm_pos += sizeof(*fmpl_wl); 650 if (fm_pos >= fm_size) 651 goto fail_bad; 652 if (be32_to_cpu(fmpl_wl->magic) != UBI_FM_POOL_MAGIC) { 653 ubi_err(ubi, "bad fastmap WL pool magic: 0x%x, expected: 0x%x", 654 be32_to_cpu(fmpl_wl->magic), UBI_FM_POOL_MAGIC); 655 goto fail_bad; 656 } 657 658 pool_size = be16_to_cpu(fmpl->size); 659 wl_pool_size = be16_to_cpu(fmpl_wl->size); 660 fm->max_pool_size = be16_to_cpu(fmpl->max_size); 661 fm->max_wl_pool_size = be16_to_cpu(fmpl_wl->max_size); 662 663 if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) { 664 ubi_err(ubi, "bad pool size: %i", pool_size); 665 goto fail_bad; 666 } 667 668 if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) { 669 ubi_err(ubi, "bad WL pool size: %i", wl_pool_size); 670 goto fail_bad; 671 } 672 673 674 if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE || 675 fm->max_pool_size < 0) { 676 ubi_err(ubi, "bad maximal pool size: %i", fm->max_pool_size); 677 goto fail_bad; 678 } 679 680 if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE || 681 fm->max_wl_pool_size < 0) { 682 ubi_err(ubi, "bad maximal WL pool size: %i", 683 fm->max_wl_pool_size); 684 goto fail_bad; 685 } 686 687 /* read EC values from free list */ 688 for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) { 689 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 690 fm_pos += sizeof(*fmec); 691 if (fm_pos >= fm_size) 692 goto fail_bad; 693 694 add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum), 695 be32_to_cpu(fmec->ec), 0); 696 } 697 698 /* read EC values from used list */ 699 for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) { 700 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 701 fm_pos += sizeof(*fmec); 702 if (fm_pos >= fm_size) 703 goto fail_bad; 704 705 add_aeb(ai, &used, be32_to_cpu(fmec->pnum), 706 be32_to_cpu(fmec->ec), 0); 707 } 708 709 /* read EC values from scrub list */ 710 for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) { 711 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 712 fm_pos += sizeof(*fmec); 713 if (fm_pos >= fm_size) 714 goto fail_bad; 715 716 add_aeb(ai, &used, be32_to_cpu(fmec->pnum), 717 be32_to_cpu(fmec->ec), 1); 718 } 719 720 /* read EC values from erase list */ 721 for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) { 722 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 723 fm_pos += sizeof(*fmec); 724 if (fm_pos >= fm_size) 725 goto fail_bad; 726 727 add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum), 728 be32_to_cpu(fmec->ec), 1); 729 } 730 731 ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count); 732 ai->bad_peb_count = be32_to_cpu(fmhdr->bad_peb_count); 733 734 /* Iterate over all volumes and read their EBA table */ 735 for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) { 736 fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos); 737 fm_pos += sizeof(*fmvhdr); 738 if (fm_pos >= fm_size) 739 goto fail_bad; 740 741 if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) { 742 ubi_err(ubi, "bad fastmap vol header magic: 0x%x, expected: 0x%x", 743 be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC); 744 goto fail_bad; 745 } 746 747 av = add_vol(ai, be32_to_cpu(fmvhdr->vol_id), 748 be32_to_cpu(fmvhdr->used_ebs), 749 be32_to_cpu(fmvhdr->data_pad), 750 fmvhdr->vol_type, 751 be32_to_cpu(fmvhdr->last_eb_bytes)); 752 753 if (!av) 754 goto fail_bad; 755 if (PTR_ERR(av) == -EINVAL) { 756 ubi_err(ubi, "volume (ID %i) already exists", 757 fmvhdr->vol_id); 758 goto fail_bad; 759 } 760 761 ai->vols_found++; 762 if (ai->highest_vol_id < be32_to_cpu(fmvhdr->vol_id)) 763 ai->highest_vol_id = be32_to_cpu(fmvhdr->vol_id); 764 765 fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos); 766 fm_pos += sizeof(*fm_eba); 767 fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs)); 768 if (fm_pos >= fm_size) 769 goto fail_bad; 770 771 if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) { 772 ubi_err(ubi, "bad fastmap EBA header magic: 0x%x, expected: 0x%x", 773 be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC); 774 goto fail_bad; 775 } 776 777 for (j = 0; j < be32_to_cpu(fm_eba->reserved_pebs); j++) { 778 int pnum = be32_to_cpu(fm_eba->pnum[j]); 779 780 if (pnum < 0) 781 continue; 782 783 aeb = NULL; 784 list_for_each_entry(tmp_aeb, &used, u.list) { 785 if (tmp_aeb->pnum == pnum) { 786 aeb = tmp_aeb; 787 break; 788 } 789 } 790 791 if (!aeb) { 792 ubi_err(ubi, "PEB %i is in EBA but not in used list", pnum); 793 goto fail_bad; 794 } 795 796 aeb->lnum = j; 797 798 if (av->highest_lnum <= aeb->lnum) 799 av->highest_lnum = aeb->lnum; 800 801 assign_aeb_to_av(ai, aeb, av); 802 803 dbg_bld("inserting PEB:%i (LEB %i) to vol %i", 804 aeb->pnum, aeb->lnum, av->vol_id); 805 } 806 } 807 808 ret = scan_pool(ubi, ai, fmpl->pebs, pool_size, &max_sqnum, &free); 809 if (ret) 810 goto fail; 811 812 ret = scan_pool(ubi, ai, fmpl_wl->pebs, wl_pool_size, &max_sqnum, &free); 813 if (ret) 814 goto fail; 815 816 if (max_sqnum > ai->max_sqnum) 817 ai->max_sqnum = max_sqnum; 818 819 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) 820 list_move_tail(&tmp_aeb->u.list, &ai->free); 821 822 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) 823 list_move_tail(&tmp_aeb->u.list, &ai->erase); 824 825 ubi_assert(list_empty(&free)); 826 827 /* 828 * If fastmap is leaking PEBs (must not happen), raise a 829 * fat warning and fall back to scanning mode. 830 * We do this here because in ubi_wl_init() it's too late 831 * and we cannot fall back to scanning. 832 */ 833 if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count - 834 ai->bad_peb_count - fm->used_blocks)) 835 goto fail_bad; 836 837 return 0; 838 839 fail_bad: 840 ret = UBI_BAD_FASTMAP; 841 fail: 842 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) { 843 list_del(&tmp_aeb->u.list); 844 kmem_cache_free(ai->aeb_slab_cache, tmp_aeb); 845 } 846 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) { 847 list_del(&tmp_aeb->u.list); 848 kmem_cache_free(ai->aeb_slab_cache, tmp_aeb); 849 } 850 851 return ret; 852 } 853 854 /** 855 * find_fm_anchor - find the most recent Fastmap superblock (anchor) 856 * @ai: UBI attach info to be filled 857 */ 858 static int find_fm_anchor(struct ubi_attach_info *ai) 859 { 860 int ret = -1; 861 struct ubi_ainf_peb *aeb; 862 unsigned long long max_sqnum = 0; 863 864 list_for_each_entry(aeb, &ai->fastmap, u.list) { 865 if (aeb->vol_id == UBI_FM_SB_VOLUME_ID && aeb->sqnum > max_sqnum) { 866 max_sqnum = aeb->sqnum; 867 ret = aeb->pnum; 868 } 869 } 870 871 return ret; 872 } 873 874 /** 875 * ubi_scan_fastmap - scan the fastmap. 876 * @ubi: UBI device object 877 * @ai: UBI attach info to be filled 878 * @scan_ai: UBI attach info from the first 64 PEBs, 879 * used to find the most recent Fastmap data structure 880 * 881 * Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found, 882 * UBI_BAD_FASTMAP if one was found but is not usable. 883 * < 0 indicates an internal error. 884 */ 885 int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai, 886 struct ubi_attach_info *scan_ai) 887 { 888 struct ubi_fm_sb *fmsb, *fmsb2; 889 struct ubi_vid_hdr *vh; 890 struct ubi_ec_hdr *ech; 891 struct ubi_fastmap_layout *fm; 892 struct ubi_ainf_peb *tmp_aeb, *aeb; 893 int i, used_blocks, pnum, fm_anchor, ret = 0; 894 size_t fm_size; 895 __be32 crc, tmp_crc; 896 unsigned long long sqnum = 0; 897 898 fm_anchor = find_fm_anchor(scan_ai); 899 if (fm_anchor < 0) 900 return UBI_NO_FASTMAP; 901 902 /* Move all (possible) fastmap blocks into our new attach structure. */ 903 list_for_each_entry_safe(aeb, tmp_aeb, &scan_ai->fastmap, u.list) 904 list_move_tail(&aeb->u.list, &ai->fastmap); 905 906 down_write(&ubi->fm_protect); 907 memset(ubi->fm_buf, 0, ubi->fm_size); 908 909 fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL); 910 if (!fmsb) { 911 ret = -ENOMEM; 912 goto out; 913 } 914 915 fm = kzalloc(sizeof(*fm), GFP_KERNEL); 916 if (!fm) { 917 ret = -ENOMEM; 918 kfree(fmsb); 919 goto out; 920 } 921 922 ret = ubi_io_read(ubi, fmsb, fm_anchor, ubi->leb_start, sizeof(*fmsb)); 923 if (ret && ret != UBI_IO_BITFLIPS) 924 goto free_fm_sb; 925 else if (ret == UBI_IO_BITFLIPS) 926 fm->to_be_tortured[0] = 1; 927 928 if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) { 929 ubi_err(ubi, "bad super block magic: 0x%x, expected: 0x%x", 930 be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC); 931 ret = UBI_BAD_FASTMAP; 932 goto free_fm_sb; 933 } 934 935 if (fmsb->version != UBI_FM_FMT_VERSION) { 936 ubi_err(ubi, "bad fastmap version: %i, expected: %i", 937 fmsb->version, UBI_FM_FMT_VERSION); 938 ret = UBI_BAD_FASTMAP; 939 goto free_fm_sb; 940 } 941 942 used_blocks = be32_to_cpu(fmsb->used_blocks); 943 if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) { 944 ubi_err(ubi, "number of fastmap blocks is invalid: %i", 945 used_blocks); 946 ret = UBI_BAD_FASTMAP; 947 goto free_fm_sb; 948 } 949 950 fm_size = ubi->leb_size * used_blocks; 951 if (fm_size != ubi->fm_size) { 952 ubi_err(ubi, "bad fastmap size: %zi, expected: %zi", 953 fm_size, ubi->fm_size); 954 ret = UBI_BAD_FASTMAP; 955 goto free_fm_sb; 956 } 957 958 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); 959 if (!ech) { 960 ret = -ENOMEM; 961 goto free_fm_sb; 962 } 963 964 vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); 965 if (!vh) { 966 ret = -ENOMEM; 967 goto free_hdr; 968 } 969 970 for (i = 0; i < used_blocks; i++) { 971 int image_seq; 972 973 pnum = be32_to_cpu(fmsb->block_loc[i]); 974 975 if (ubi_io_is_bad(ubi, pnum)) { 976 ret = UBI_BAD_FASTMAP; 977 goto free_hdr; 978 } 979 980 if (i == 0 && pnum != fm_anchor) { 981 ubi_err(ubi, "Fastmap anchor PEB mismatch: PEB: %i vs. %i", 982 pnum, fm_anchor); 983 ret = UBI_BAD_FASTMAP; 984 goto free_hdr; 985 } 986 987 ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0); 988 if (ret && ret != UBI_IO_BITFLIPS) { 989 ubi_err(ubi, "unable to read fastmap block# %i EC (PEB: %i)", 990 i, pnum); 991 if (ret > 0) 992 ret = UBI_BAD_FASTMAP; 993 goto free_hdr; 994 } else if (ret == UBI_IO_BITFLIPS) 995 fm->to_be_tortured[i] = 1; 996 997 image_seq = be32_to_cpu(ech->image_seq); 998 if (!ubi->image_seq) 999 ubi->image_seq = image_seq; 1000 1001 /* 1002 * Older UBI implementations have image_seq set to zero, so 1003 * we shouldn't fail if image_seq == 0. 1004 */ 1005 if (image_seq && (image_seq != ubi->image_seq)) { 1006 ubi_err(ubi, "wrong image seq:%d instead of %d", 1007 be32_to_cpu(ech->image_seq), ubi->image_seq); 1008 ret = UBI_BAD_FASTMAP; 1009 goto free_hdr; 1010 } 1011 1012 ret = ubi_io_read_vid_hdr(ubi, pnum, vh, 0); 1013 if (ret && ret != UBI_IO_BITFLIPS) { 1014 ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i)", 1015 i, pnum); 1016 goto free_hdr; 1017 } 1018 1019 if (i == 0) { 1020 if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) { 1021 ubi_err(ubi, "bad fastmap anchor vol_id: 0x%x, expected: 0x%x", 1022 be32_to_cpu(vh->vol_id), 1023 UBI_FM_SB_VOLUME_ID); 1024 ret = UBI_BAD_FASTMAP; 1025 goto free_hdr; 1026 } 1027 } else { 1028 if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) { 1029 ubi_err(ubi, "bad fastmap data vol_id: 0x%x, expected: 0x%x", 1030 be32_to_cpu(vh->vol_id), 1031 UBI_FM_DATA_VOLUME_ID); 1032 ret = UBI_BAD_FASTMAP; 1033 goto free_hdr; 1034 } 1035 } 1036 1037 if (sqnum < be64_to_cpu(vh->sqnum)) 1038 sqnum = be64_to_cpu(vh->sqnum); 1039 1040 ret = ubi_io_read(ubi, ubi->fm_buf + (ubi->leb_size * i), pnum, 1041 ubi->leb_start, ubi->leb_size); 1042 if (ret && ret != UBI_IO_BITFLIPS) { 1043 ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i, " 1044 "err: %i)", i, pnum, ret); 1045 goto free_hdr; 1046 } 1047 } 1048 1049 kfree(fmsb); 1050 fmsb = NULL; 1051 1052 fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf); 1053 tmp_crc = be32_to_cpu(fmsb2->data_crc); 1054 fmsb2->data_crc = 0; 1055 crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size); 1056 if (crc != tmp_crc) { 1057 ubi_err(ubi, "fastmap data CRC is invalid"); 1058 ubi_err(ubi, "CRC should be: 0x%x, calc: 0x%x", 1059 tmp_crc, crc); 1060 ret = UBI_BAD_FASTMAP; 1061 goto free_hdr; 1062 } 1063 1064 fmsb2->sqnum = sqnum; 1065 1066 fm->used_blocks = used_blocks; 1067 1068 ret = ubi_attach_fastmap(ubi, ai, fm); 1069 if (ret) { 1070 if (ret > 0) 1071 ret = UBI_BAD_FASTMAP; 1072 goto free_hdr; 1073 } 1074 1075 for (i = 0; i < used_blocks; i++) { 1076 struct ubi_wl_entry *e; 1077 1078 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); 1079 if (!e) { 1080 while (i--) 1081 kfree(fm->e[i]); 1082 1083 ret = -ENOMEM; 1084 goto free_hdr; 1085 } 1086 1087 e->pnum = be32_to_cpu(fmsb2->block_loc[i]); 1088 e->ec = be32_to_cpu(fmsb2->block_ec[i]); 1089 fm->e[i] = e; 1090 } 1091 1092 ubi->fm = fm; 1093 ubi->fm_pool.max_size = ubi->fm->max_pool_size; 1094 ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size; 1095 ubi_msg(ubi, "attached by fastmap"); 1096 ubi_msg(ubi, "fastmap pool size: %d", ubi->fm_pool.max_size); 1097 ubi_msg(ubi, "fastmap WL pool size: %d", 1098 ubi->fm_wl_pool.max_size); 1099 ubi->fm_disabled = 0; 1100 ubi->fast_attach = 1; 1101 1102 ubi_free_vid_hdr(ubi, vh); 1103 kfree(ech); 1104 out: 1105 up_write(&ubi->fm_protect); 1106 if (ret == UBI_BAD_FASTMAP) 1107 ubi_err(ubi, "Attach by fastmap failed, doing a full scan!"); 1108 return ret; 1109 1110 free_hdr: 1111 ubi_free_vid_hdr(ubi, vh); 1112 kfree(ech); 1113 free_fm_sb: 1114 kfree(fmsb); 1115 kfree(fm); 1116 goto out; 1117 } 1118 1119 /** 1120 * ubi_write_fastmap - writes a fastmap. 1121 * @ubi: UBI device object 1122 * @new_fm: the to be written fastmap 1123 * 1124 * Returns 0 on success, < 0 indicates an internal error. 1125 */ 1126 static int ubi_write_fastmap(struct ubi_device *ubi, 1127 struct ubi_fastmap_layout *new_fm) 1128 { 1129 size_t fm_pos = 0; 1130 void *fm_raw; 1131 struct ubi_fm_sb *fmsb; 1132 struct ubi_fm_hdr *fmh; 1133 struct ubi_fm_scan_pool *fmpl, *fmpl_wl; 1134 struct ubi_fm_ec *fec; 1135 struct ubi_fm_volhdr *fvh; 1136 struct ubi_fm_eba *feba; 1137 struct ubi_wl_entry *wl_e; 1138 struct ubi_volume *vol; 1139 struct ubi_vid_hdr *avhdr, *dvhdr; 1140 struct ubi_work *ubi_wrk; 1141 struct rb_node *tmp_rb; 1142 int ret, i, j, free_peb_count, used_peb_count, vol_count; 1143 int scrub_peb_count, erase_peb_count; 1144 unsigned long *seen_pebs = NULL; 1145 1146 fm_raw = ubi->fm_buf; 1147 memset(ubi->fm_buf, 0, ubi->fm_size); 1148 1149 avhdr = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID); 1150 if (!avhdr) { 1151 ret = -ENOMEM; 1152 goto out; 1153 } 1154 1155 dvhdr = new_fm_vhdr(ubi, UBI_FM_DATA_VOLUME_ID); 1156 if (!dvhdr) { 1157 ret = -ENOMEM; 1158 goto out_kfree; 1159 } 1160 1161 seen_pebs = init_seen(ubi); 1162 if (IS_ERR(seen_pebs)) { 1163 ret = PTR_ERR(seen_pebs); 1164 goto out_kfree; 1165 } 1166 1167 spin_lock(&ubi->volumes_lock); 1168 spin_lock(&ubi->wl_lock); 1169 1170 fmsb = (struct ubi_fm_sb *)fm_raw; 1171 fm_pos += sizeof(*fmsb); 1172 ubi_assert(fm_pos <= ubi->fm_size); 1173 1174 fmh = (struct ubi_fm_hdr *)(fm_raw + fm_pos); 1175 fm_pos += sizeof(*fmh); 1176 ubi_assert(fm_pos <= ubi->fm_size); 1177 1178 fmsb->magic = cpu_to_be32(UBI_FM_SB_MAGIC); 1179 fmsb->version = UBI_FM_FMT_VERSION; 1180 fmsb->used_blocks = cpu_to_be32(new_fm->used_blocks); 1181 /* the max sqnum will be filled in while *reading* the fastmap */ 1182 fmsb->sqnum = 0; 1183 1184 fmh->magic = cpu_to_be32(UBI_FM_HDR_MAGIC); 1185 free_peb_count = 0; 1186 used_peb_count = 0; 1187 scrub_peb_count = 0; 1188 erase_peb_count = 0; 1189 vol_count = 0; 1190 1191 fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos); 1192 fm_pos += sizeof(*fmpl); 1193 fmpl->magic = cpu_to_be32(UBI_FM_POOL_MAGIC); 1194 fmpl->size = cpu_to_be16(ubi->fm_pool.size); 1195 fmpl->max_size = cpu_to_be16(ubi->fm_pool.max_size); 1196 1197 for (i = 0; i < ubi->fm_pool.size; i++) { 1198 fmpl->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]); 1199 set_seen(ubi, ubi->fm_pool.pebs[i], seen_pebs); 1200 } 1201 1202 fmpl_wl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos); 1203 fm_pos += sizeof(*fmpl_wl); 1204 fmpl_wl->magic = cpu_to_be32(UBI_FM_POOL_MAGIC); 1205 fmpl_wl->size = cpu_to_be16(ubi->fm_wl_pool.size); 1206 fmpl_wl->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size); 1207 1208 for (i = 0; i < ubi->fm_wl_pool.size; i++) { 1209 fmpl_wl->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]); 1210 set_seen(ubi, ubi->fm_wl_pool.pebs[i], seen_pebs); 1211 } 1212 1213 ubi_for_each_free_peb(ubi, wl_e, tmp_rb) { 1214 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 1215 1216 fec->pnum = cpu_to_be32(wl_e->pnum); 1217 set_seen(ubi, wl_e->pnum, seen_pebs); 1218 fec->ec = cpu_to_be32(wl_e->ec); 1219 1220 free_peb_count++; 1221 fm_pos += sizeof(*fec); 1222 ubi_assert(fm_pos <= ubi->fm_size); 1223 } 1224 fmh->free_peb_count = cpu_to_be32(free_peb_count); 1225 1226 ubi_for_each_used_peb(ubi, wl_e, tmp_rb) { 1227 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 1228 1229 fec->pnum = cpu_to_be32(wl_e->pnum); 1230 set_seen(ubi, wl_e->pnum, seen_pebs); 1231 fec->ec = cpu_to_be32(wl_e->ec); 1232 1233 used_peb_count++; 1234 fm_pos += sizeof(*fec); 1235 ubi_assert(fm_pos <= ubi->fm_size); 1236 } 1237 1238 ubi_for_each_protected_peb(ubi, i, wl_e) { 1239 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 1240 1241 fec->pnum = cpu_to_be32(wl_e->pnum); 1242 set_seen(ubi, wl_e->pnum, seen_pebs); 1243 fec->ec = cpu_to_be32(wl_e->ec); 1244 1245 used_peb_count++; 1246 fm_pos += sizeof(*fec); 1247 ubi_assert(fm_pos <= ubi->fm_size); 1248 } 1249 fmh->used_peb_count = cpu_to_be32(used_peb_count); 1250 1251 ubi_for_each_scrub_peb(ubi, wl_e, tmp_rb) { 1252 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 1253 1254 fec->pnum = cpu_to_be32(wl_e->pnum); 1255 set_seen(ubi, wl_e->pnum, seen_pebs); 1256 fec->ec = cpu_to_be32(wl_e->ec); 1257 1258 scrub_peb_count++; 1259 fm_pos += sizeof(*fec); 1260 ubi_assert(fm_pos <= ubi->fm_size); 1261 } 1262 fmh->scrub_peb_count = cpu_to_be32(scrub_peb_count); 1263 1264 1265 list_for_each_entry(ubi_wrk, &ubi->works, list) { 1266 if (ubi_is_erase_work(ubi_wrk)) { 1267 wl_e = ubi_wrk->e; 1268 ubi_assert(wl_e); 1269 1270 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 1271 1272 fec->pnum = cpu_to_be32(wl_e->pnum); 1273 set_seen(ubi, wl_e->pnum, seen_pebs); 1274 fec->ec = cpu_to_be32(wl_e->ec); 1275 1276 erase_peb_count++; 1277 fm_pos += sizeof(*fec); 1278 ubi_assert(fm_pos <= ubi->fm_size); 1279 } 1280 } 1281 fmh->erase_peb_count = cpu_to_be32(erase_peb_count); 1282 1283 for (i = 0; i < UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT; i++) { 1284 vol = ubi->volumes[i]; 1285 1286 if (!vol) 1287 continue; 1288 1289 vol_count++; 1290 1291 fvh = (struct ubi_fm_volhdr *)(fm_raw + fm_pos); 1292 fm_pos += sizeof(*fvh); 1293 ubi_assert(fm_pos <= ubi->fm_size); 1294 1295 fvh->magic = cpu_to_be32(UBI_FM_VHDR_MAGIC); 1296 fvh->vol_id = cpu_to_be32(vol->vol_id); 1297 fvh->vol_type = vol->vol_type; 1298 fvh->used_ebs = cpu_to_be32(vol->used_ebs); 1299 fvh->data_pad = cpu_to_be32(vol->data_pad); 1300 fvh->last_eb_bytes = cpu_to_be32(vol->last_eb_bytes); 1301 1302 ubi_assert(vol->vol_type == UBI_DYNAMIC_VOLUME || 1303 vol->vol_type == UBI_STATIC_VOLUME); 1304 1305 feba = (struct ubi_fm_eba *)(fm_raw + fm_pos); 1306 fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->reserved_pebs); 1307 ubi_assert(fm_pos <= ubi->fm_size); 1308 1309 for (j = 0; j < vol->reserved_pebs; j++) 1310 feba->pnum[j] = cpu_to_be32(vol->eba_tbl[j]); 1311 1312 feba->reserved_pebs = cpu_to_be32(j); 1313 feba->magic = cpu_to_be32(UBI_FM_EBA_MAGIC); 1314 } 1315 fmh->vol_count = cpu_to_be32(vol_count); 1316 fmh->bad_peb_count = cpu_to_be32(ubi->bad_peb_count); 1317 1318 avhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); 1319 avhdr->lnum = 0; 1320 1321 spin_unlock(&ubi->wl_lock); 1322 spin_unlock(&ubi->volumes_lock); 1323 1324 dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum); 1325 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avhdr); 1326 if (ret) { 1327 ubi_err(ubi, "unable to write vid_hdr to fastmap SB!"); 1328 goto out_kfree; 1329 } 1330 1331 for (i = 0; i < new_fm->used_blocks; i++) { 1332 fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum); 1333 set_seen(ubi, new_fm->e[i]->pnum, seen_pebs); 1334 fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec); 1335 } 1336 1337 fmsb->data_crc = 0; 1338 fmsb->data_crc = cpu_to_be32(crc32(UBI_CRC32_INIT, fm_raw, 1339 ubi->fm_size)); 1340 1341 for (i = 1; i < new_fm->used_blocks; i++) { 1342 dvhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); 1343 dvhdr->lnum = cpu_to_be32(i); 1344 dbg_bld("writing fastmap data to PEB %i sqnum %llu", 1345 new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum)); 1346 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvhdr); 1347 if (ret) { 1348 ubi_err(ubi, "unable to write vid_hdr to PEB %i!", 1349 new_fm->e[i]->pnum); 1350 goto out_kfree; 1351 } 1352 } 1353 1354 for (i = 0; i < new_fm->used_blocks; i++) { 1355 ret = ubi_io_write(ubi, fm_raw + (i * ubi->leb_size), 1356 new_fm->e[i]->pnum, ubi->leb_start, ubi->leb_size); 1357 if (ret) { 1358 ubi_err(ubi, "unable to write fastmap to PEB %i!", 1359 new_fm->e[i]->pnum); 1360 goto out_kfree; 1361 } 1362 } 1363 1364 ubi_assert(new_fm); 1365 ubi->fm = new_fm; 1366 1367 ret = self_check_seen(ubi, seen_pebs); 1368 dbg_bld("fastmap written!"); 1369 1370 out_kfree: 1371 ubi_free_vid_hdr(ubi, avhdr); 1372 ubi_free_vid_hdr(ubi, dvhdr); 1373 free_seen(seen_pebs); 1374 out: 1375 return ret; 1376 } 1377 1378 /** 1379 * erase_block - Manually erase a PEB. 1380 * @ubi: UBI device object 1381 * @pnum: PEB to be erased 1382 * 1383 * Returns the new EC value on success, < 0 indicates an internal error. 1384 */ 1385 static int erase_block(struct ubi_device *ubi, int pnum) 1386 { 1387 int ret; 1388 struct ubi_ec_hdr *ec_hdr; 1389 long long ec; 1390 1391 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); 1392 if (!ec_hdr) 1393 return -ENOMEM; 1394 1395 ret = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0); 1396 if (ret < 0) 1397 goto out; 1398 else if (ret && ret != UBI_IO_BITFLIPS) { 1399 ret = -EINVAL; 1400 goto out; 1401 } 1402 1403 ret = ubi_io_sync_erase(ubi, pnum, 0); 1404 if (ret < 0) 1405 goto out; 1406 1407 ec = be64_to_cpu(ec_hdr->ec); 1408 ec += ret; 1409 if (ec > UBI_MAX_ERASECOUNTER) { 1410 ret = -EINVAL; 1411 goto out; 1412 } 1413 1414 ec_hdr->ec = cpu_to_be64(ec); 1415 ret = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr); 1416 if (ret < 0) 1417 goto out; 1418 1419 ret = ec; 1420 out: 1421 kfree(ec_hdr); 1422 return ret; 1423 } 1424 1425 /** 1426 * invalidate_fastmap - destroys a fastmap. 1427 * @ubi: UBI device object 1428 * 1429 * This function ensures that upon next UBI attach a full scan 1430 * is issued. We need this if UBI is about to write a new fastmap 1431 * but is unable to do so. In this case we have two options: 1432 * a) Make sure that the current fastmap will not be usued upon 1433 * attach time and contine or b) fall back to RO mode to have the 1434 * current fastmap in a valid state. 1435 * Returns 0 on success, < 0 indicates an internal error. 1436 */ 1437 static int invalidate_fastmap(struct ubi_device *ubi) 1438 { 1439 int ret; 1440 struct ubi_fastmap_layout *fm; 1441 struct ubi_wl_entry *e; 1442 struct ubi_vid_hdr *vh = NULL; 1443 1444 if (!ubi->fm) 1445 return 0; 1446 1447 ubi->fm = NULL; 1448 1449 ret = -ENOMEM; 1450 fm = kzalloc(sizeof(*fm), GFP_KERNEL); 1451 if (!fm) 1452 goto out; 1453 1454 vh = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID); 1455 if (!vh) 1456 goto out_free_fm; 1457 1458 ret = -ENOSPC; 1459 e = ubi_wl_get_fm_peb(ubi, 1); 1460 if (!e) 1461 goto out_free_fm; 1462 1463 /* 1464 * Create fake fastmap such that UBI will fall back 1465 * to scanning mode. 1466 */ 1467 vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); 1468 ret = ubi_io_write_vid_hdr(ubi, e->pnum, vh); 1469 if (ret < 0) { 1470 ubi_wl_put_fm_peb(ubi, e, 0, 0); 1471 goto out_free_fm; 1472 } 1473 1474 fm->used_blocks = 1; 1475 fm->e[0] = e; 1476 1477 ubi->fm = fm; 1478 1479 out: 1480 ubi_free_vid_hdr(ubi, vh); 1481 return ret; 1482 1483 out_free_fm: 1484 kfree(fm); 1485 goto out; 1486 } 1487 1488 /** 1489 * return_fm_pebs - returns all PEBs used by a fastmap back to the 1490 * WL sub-system. 1491 * @ubi: UBI device object 1492 * @fm: fastmap layout object 1493 */ 1494 static void return_fm_pebs(struct ubi_device *ubi, 1495 struct ubi_fastmap_layout *fm) 1496 { 1497 int i; 1498 1499 if (!fm) 1500 return; 1501 1502 for (i = 0; i < fm->used_blocks; i++) { 1503 if (fm->e[i]) { 1504 ubi_wl_put_fm_peb(ubi, fm->e[i], i, 1505 fm->to_be_tortured[i]); 1506 fm->e[i] = NULL; 1507 } 1508 } 1509 } 1510 1511 /** 1512 * ubi_update_fastmap - will be called by UBI if a volume changes or 1513 * a fastmap pool becomes full. 1514 * @ubi: UBI device object 1515 * 1516 * Returns 0 on success, < 0 indicates an internal error. 1517 */ 1518 int ubi_update_fastmap(struct ubi_device *ubi) 1519 { 1520 int ret, i, j; 1521 struct ubi_fastmap_layout *new_fm, *old_fm; 1522 struct ubi_wl_entry *tmp_e; 1523 1524 down_write(&ubi->fm_protect); 1525 1526 ubi_refill_pools(ubi); 1527 1528 if (ubi->ro_mode || ubi->fm_disabled) { 1529 up_write(&ubi->fm_protect); 1530 return 0; 1531 } 1532 1533 ret = ubi_ensure_anchor_pebs(ubi); 1534 if (ret) { 1535 up_write(&ubi->fm_protect); 1536 return ret; 1537 } 1538 1539 new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL); 1540 if (!new_fm) { 1541 up_write(&ubi->fm_protect); 1542 return -ENOMEM; 1543 } 1544 1545 new_fm->used_blocks = ubi->fm_size / ubi->leb_size; 1546 old_fm = ubi->fm; 1547 ubi->fm = NULL; 1548 1549 if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) { 1550 ubi_err(ubi, "fastmap too large"); 1551 ret = -ENOSPC; 1552 goto err; 1553 } 1554 1555 for (i = 1; i < new_fm->used_blocks; i++) { 1556 spin_lock(&ubi->wl_lock); 1557 tmp_e = ubi_wl_get_fm_peb(ubi, 0); 1558 spin_unlock(&ubi->wl_lock); 1559 1560 if (!tmp_e) { 1561 if (old_fm && old_fm->e[i]) { 1562 ret = erase_block(ubi, old_fm->e[i]->pnum); 1563 if (ret < 0) { 1564 ubi_err(ubi, "could not erase old fastmap PEB"); 1565 1566 for (j = 1; j < i; j++) { 1567 ubi_wl_put_fm_peb(ubi, new_fm->e[j], 1568 j, 0); 1569 new_fm->e[j] = NULL; 1570 } 1571 goto err; 1572 } 1573 new_fm->e[i] = old_fm->e[i]; 1574 old_fm->e[i] = NULL; 1575 } else { 1576 ubi_err(ubi, "could not get any free erase block"); 1577 1578 for (j = 1; j < i; j++) { 1579 ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0); 1580 new_fm->e[j] = NULL; 1581 } 1582 1583 ret = -ENOSPC; 1584 goto err; 1585 } 1586 } else { 1587 new_fm->e[i] = tmp_e; 1588 1589 if (old_fm && old_fm->e[i]) { 1590 ubi_wl_put_fm_peb(ubi, old_fm->e[i], i, 1591 old_fm->to_be_tortured[i]); 1592 old_fm->e[i] = NULL; 1593 } 1594 } 1595 } 1596 1597 /* Old fastmap is larger than the new one */ 1598 if (old_fm && new_fm->used_blocks < old_fm->used_blocks) { 1599 for (i = new_fm->used_blocks; i < old_fm->used_blocks; i++) { 1600 ubi_wl_put_fm_peb(ubi, old_fm->e[i], i, 1601 old_fm->to_be_tortured[i]); 1602 old_fm->e[i] = NULL; 1603 } 1604 } 1605 1606 spin_lock(&ubi->wl_lock); 1607 tmp_e = ubi_wl_get_fm_peb(ubi, 1); 1608 spin_unlock(&ubi->wl_lock); 1609 1610 if (old_fm) { 1611 /* no fresh anchor PEB was found, reuse the old one */ 1612 if (!tmp_e) { 1613 ret = erase_block(ubi, old_fm->e[0]->pnum); 1614 if (ret < 0) { 1615 ubi_err(ubi, "could not erase old anchor PEB"); 1616 1617 for (i = 1; i < new_fm->used_blocks; i++) { 1618 ubi_wl_put_fm_peb(ubi, new_fm->e[i], 1619 i, 0); 1620 new_fm->e[i] = NULL; 1621 } 1622 goto err; 1623 } 1624 new_fm->e[0] = old_fm->e[0]; 1625 new_fm->e[0]->ec = ret; 1626 old_fm->e[0] = NULL; 1627 } else { 1628 /* we've got a new anchor PEB, return the old one */ 1629 ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0, 1630 old_fm->to_be_tortured[0]); 1631 new_fm->e[0] = tmp_e; 1632 old_fm->e[0] = NULL; 1633 } 1634 } else { 1635 if (!tmp_e) { 1636 ubi_err(ubi, "could not find any anchor PEB"); 1637 1638 for (i = 1; i < new_fm->used_blocks; i++) { 1639 ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0); 1640 new_fm->e[i] = NULL; 1641 } 1642 1643 ret = -ENOSPC; 1644 goto err; 1645 } 1646 new_fm->e[0] = tmp_e; 1647 } 1648 1649 down_write(&ubi->work_sem); 1650 down_write(&ubi->fm_eba_sem); 1651 ret = ubi_write_fastmap(ubi, new_fm); 1652 up_write(&ubi->fm_eba_sem); 1653 up_write(&ubi->work_sem); 1654 1655 if (ret) 1656 goto err; 1657 1658 out_unlock: 1659 up_write(&ubi->fm_protect); 1660 kfree(old_fm); 1661 return ret; 1662 1663 err: 1664 ubi_warn(ubi, "Unable to write new fastmap, err=%i", ret); 1665 1666 ret = invalidate_fastmap(ubi); 1667 if (ret < 0) { 1668 ubi_err(ubi, "Unable to invalidiate current fastmap!"); 1669 ubi_ro_mode(ubi); 1670 } else { 1671 return_fm_pebs(ubi, old_fm); 1672 return_fm_pebs(ubi, new_fm); 1673 ret = 0; 1674 } 1675 1676 kfree(new_fm); 1677 goto out_unlock; 1678 } 1679