1 /* 2 * Copyright (c) 2012 Linutronix GmbH 3 * Copyright (c) 2014 sigma star gmbh 4 * Author: Richard Weinberger <richard@nod.at> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; version 2. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See 13 * the GNU General Public License for more details. 14 * 15 */ 16 17 #include <linux/crc32.h> 18 #include "ubi.h" 19 20 /** 21 * init_seen - allocate memory for used for debugging. 22 * @ubi: UBI device description object 23 */ 24 static inline int *init_seen(struct ubi_device *ubi) 25 { 26 int *ret; 27 28 if (!ubi_dbg_chk_fastmap(ubi)) 29 return NULL; 30 31 ret = kcalloc(ubi->peb_count, sizeof(int), GFP_KERNEL); 32 if (!ret) 33 return ERR_PTR(-ENOMEM); 34 35 return ret; 36 } 37 38 /** 39 * free_seen - free the seen logic integer array. 40 * @seen: integer array of @ubi->peb_count size 41 */ 42 static inline void free_seen(int *seen) 43 { 44 kfree(seen); 45 } 46 47 /** 48 * set_seen - mark a PEB as seen. 49 * @ubi: UBI device description object 50 * @pnum: The PEB to be makred as seen 51 * @seen: integer array of @ubi->peb_count size 52 */ 53 static inline void set_seen(struct ubi_device *ubi, int pnum, int *seen) 54 { 55 if (!ubi_dbg_chk_fastmap(ubi) || !seen) 56 return; 57 58 seen[pnum] = 1; 59 } 60 61 /** 62 * self_check_seen - check whether all PEB have been seen by fastmap. 63 * @ubi: UBI device description object 64 * @seen: integer array of @ubi->peb_count size 65 */ 66 static int self_check_seen(struct ubi_device *ubi, int *seen) 67 { 68 int pnum, ret = 0; 69 70 if (!ubi_dbg_chk_fastmap(ubi) || !seen) 71 return 0; 72 73 for (pnum = 0; pnum < ubi->peb_count; pnum++) { 74 if (!seen[pnum] && ubi->lookuptbl[pnum]) { 75 ubi_err(ubi, "self-check failed for PEB %d, fastmap didn't see it", pnum); 76 ret = -EINVAL; 77 } 78 } 79 80 return ret; 81 } 82 83 /** 84 * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device. 85 * @ubi: UBI device description object 86 */ 87 size_t ubi_calc_fm_size(struct ubi_device *ubi) 88 { 89 size_t size; 90 91 size = sizeof(struct ubi_fm_sb) + \ 92 sizeof(struct ubi_fm_hdr) + \ 93 sizeof(struct ubi_fm_scan_pool) + \ 94 sizeof(struct ubi_fm_scan_pool) + \ 95 (ubi->peb_count * sizeof(struct ubi_fm_ec)) + \ 96 (sizeof(struct ubi_fm_eba) + \ 97 (ubi->peb_count * sizeof(__be32))) + \ 98 sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES; 99 return roundup(size, ubi->leb_size); 100 } 101 102 103 /** 104 * new_fm_vhdr - allocate a new volume header for fastmap usage. 105 * @ubi: UBI device description object 106 * @vol_id: the VID of the new header 107 * 108 * Returns a new struct ubi_vid_hdr on success. 109 * NULL indicates out of memory. 110 */ 111 static struct ubi_vid_hdr *new_fm_vhdr(struct ubi_device *ubi, int vol_id) 112 { 113 struct ubi_vid_hdr *new; 114 115 new = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); 116 if (!new) 117 goto out; 118 119 new->vol_type = UBI_VID_DYNAMIC; 120 new->vol_id = cpu_to_be32(vol_id); 121 122 /* UBI implementations without fastmap support have to delete the 123 * fastmap. 124 */ 125 new->compat = UBI_COMPAT_DELETE; 126 127 out: 128 return new; 129 } 130 131 /** 132 * add_aeb - create and add a attach erase block to a given list. 133 * @ai: UBI attach info object 134 * @list: the target list 135 * @pnum: PEB number of the new attach erase block 136 * @ec: erease counter of the new LEB 137 * @scrub: scrub this PEB after attaching 138 * 139 * Returns 0 on success, < 0 indicates an internal error. 140 */ 141 static int add_aeb(struct ubi_attach_info *ai, struct list_head *list, 142 int pnum, int ec, int scrub) 143 { 144 struct ubi_ainf_peb *aeb; 145 146 aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL); 147 if (!aeb) 148 return -ENOMEM; 149 150 aeb->pnum = pnum; 151 aeb->ec = ec; 152 aeb->lnum = -1; 153 aeb->scrub = scrub; 154 aeb->copy_flag = aeb->sqnum = 0; 155 156 ai->ec_sum += aeb->ec; 157 ai->ec_count++; 158 159 if (ai->max_ec < aeb->ec) 160 ai->max_ec = aeb->ec; 161 162 if (ai->min_ec > aeb->ec) 163 ai->min_ec = aeb->ec; 164 165 list_add_tail(&aeb->u.list, list); 166 167 return 0; 168 } 169 170 /** 171 * add_vol - create and add a new volume to ubi_attach_info. 172 * @ai: ubi_attach_info object 173 * @vol_id: VID of the new volume 174 * @used_ebs: number of used EBS 175 * @data_pad: data padding value of the new volume 176 * @vol_type: volume type 177 * @last_eb_bytes: number of bytes in the last LEB 178 * 179 * Returns the new struct ubi_ainf_volume on success. 180 * NULL indicates an error. 181 */ 182 static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id, 183 int used_ebs, int data_pad, u8 vol_type, 184 int last_eb_bytes) 185 { 186 struct ubi_ainf_volume *av; 187 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL; 188 189 while (*p) { 190 parent = *p; 191 av = rb_entry(parent, struct ubi_ainf_volume, rb); 192 193 if (vol_id > av->vol_id) 194 p = &(*p)->rb_left; 195 else 196 p = &(*p)->rb_right; 197 } 198 199 av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL); 200 if (!av) 201 goto out; 202 203 av->highest_lnum = av->leb_count = av->used_ebs = 0; 204 av->vol_id = vol_id; 205 av->data_pad = data_pad; 206 av->last_data_size = last_eb_bytes; 207 av->compat = 0; 208 av->vol_type = vol_type; 209 av->root = RB_ROOT; 210 if (av->vol_type == UBI_STATIC_VOLUME) 211 av->used_ebs = used_ebs; 212 213 dbg_bld("found volume (ID %i)", vol_id); 214 215 rb_link_node(&av->rb, parent, p); 216 rb_insert_color(&av->rb, &ai->volumes); 217 218 out: 219 return av; 220 } 221 222 /** 223 * assign_aeb_to_av - assigns a SEB to a given ainf_volume and removes it 224 * from it's original list. 225 * @ai: ubi_attach_info object 226 * @aeb: the to be assigned SEB 227 * @av: target scan volume 228 */ 229 static void assign_aeb_to_av(struct ubi_attach_info *ai, 230 struct ubi_ainf_peb *aeb, 231 struct ubi_ainf_volume *av) 232 { 233 struct ubi_ainf_peb *tmp_aeb; 234 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL; 235 236 p = &av->root.rb_node; 237 while (*p) { 238 parent = *p; 239 240 tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb); 241 if (aeb->lnum != tmp_aeb->lnum) { 242 if (aeb->lnum < tmp_aeb->lnum) 243 p = &(*p)->rb_left; 244 else 245 p = &(*p)->rb_right; 246 247 continue; 248 } else 249 break; 250 } 251 252 list_del(&aeb->u.list); 253 av->leb_count++; 254 255 rb_link_node(&aeb->u.rb, parent, p); 256 rb_insert_color(&aeb->u.rb, &av->root); 257 } 258 259 /** 260 * update_vol - inserts or updates a LEB which was found a pool. 261 * @ubi: the UBI device object 262 * @ai: attach info object 263 * @av: the volume this LEB belongs to 264 * @new_vh: the volume header derived from new_aeb 265 * @new_aeb: the AEB to be examined 266 * 267 * Returns 0 on success, < 0 indicates an internal error. 268 */ 269 static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai, 270 struct ubi_ainf_volume *av, struct ubi_vid_hdr *new_vh, 271 struct ubi_ainf_peb *new_aeb) 272 { 273 struct rb_node **p = &av->root.rb_node, *parent = NULL; 274 struct ubi_ainf_peb *aeb, *victim; 275 int cmp_res; 276 277 while (*p) { 278 parent = *p; 279 aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb); 280 281 if (be32_to_cpu(new_vh->lnum) != aeb->lnum) { 282 if (be32_to_cpu(new_vh->lnum) < aeb->lnum) 283 p = &(*p)->rb_left; 284 else 285 p = &(*p)->rb_right; 286 287 continue; 288 } 289 290 /* This case can happen if the fastmap gets written 291 * because of a volume change (creation, deletion, ..). 292 * Then a PEB can be within the persistent EBA and the pool. 293 */ 294 if (aeb->pnum == new_aeb->pnum) { 295 ubi_assert(aeb->lnum == new_aeb->lnum); 296 kmem_cache_free(ai->aeb_slab_cache, new_aeb); 297 298 return 0; 299 } 300 301 cmp_res = ubi_compare_lebs(ubi, aeb, new_aeb->pnum, new_vh); 302 if (cmp_res < 0) 303 return cmp_res; 304 305 /* new_aeb is newer */ 306 if (cmp_res & 1) { 307 victim = kmem_cache_alloc(ai->aeb_slab_cache, 308 GFP_KERNEL); 309 if (!victim) 310 return -ENOMEM; 311 312 victim->ec = aeb->ec; 313 victim->pnum = aeb->pnum; 314 list_add_tail(&victim->u.list, &ai->erase); 315 316 if (av->highest_lnum == be32_to_cpu(new_vh->lnum)) 317 av->last_data_size = \ 318 be32_to_cpu(new_vh->data_size); 319 320 dbg_bld("vol %i: AEB %i's PEB %i is the newer", 321 av->vol_id, aeb->lnum, new_aeb->pnum); 322 323 aeb->ec = new_aeb->ec; 324 aeb->pnum = new_aeb->pnum; 325 aeb->copy_flag = new_vh->copy_flag; 326 aeb->scrub = new_aeb->scrub; 327 kmem_cache_free(ai->aeb_slab_cache, new_aeb); 328 329 /* new_aeb is older */ 330 } else { 331 dbg_bld("vol %i: AEB %i's PEB %i is old, dropping it", 332 av->vol_id, aeb->lnum, new_aeb->pnum); 333 list_add_tail(&new_aeb->u.list, &ai->erase); 334 } 335 336 return 0; 337 } 338 /* This LEB is new, let's add it to the volume */ 339 340 if (av->highest_lnum <= be32_to_cpu(new_vh->lnum)) { 341 av->highest_lnum = be32_to_cpu(new_vh->lnum); 342 av->last_data_size = be32_to_cpu(new_vh->data_size); 343 } 344 345 if (av->vol_type == UBI_STATIC_VOLUME) 346 av->used_ebs = be32_to_cpu(new_vh->used_ebs); 347 348 av->leb_count++; 349 350 rb_link_node(&new_aeb->u.rb, parent, p); 351 rb_insert_color(&new_aeb->u.rb, &av->root); 352 353 return 0; 354 } 355 356 /** 357 * process_pool_aeb - we found a non-empty PEB in a pool. 358 * @ubi: UBI device object 359 * @ai: attach info object 360 * @new_vh: the volume header derived from new_aeb 361 * @new_aeb: the AEB to be examined 362 * 363 * Returns 0 on success, < 0 indicates an internal error. 364 */ 365 static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai, 366 struct ubi_vid_hdr *new_vh, 367 struct ubi_ainf_peb *new_aeb) 368 { 369 struct ubi_ainf_volume *av, *tmp_av = NULL; 370 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL; 371 int found = 0; 372 373 if (be32_to_cpu(new_vh->vol_id) == UBI_FM_SB_VOLUME_ID || 374 be32_to_cpu(new_vh->vol_id) == UBI_FM_DATA_VOLUME_ID) { 375 kmem_cache_free(ai->aeb_slab_cache, new_aeb); 376 377 return 0; 378 } 379 380 /* Find the volume this SEB belongs to */ 381 while (*p) { 382 parent = *p; 383 tmp_av = rb_entry(parent, struct ubi_ainf_volume, rb); 384 385 if (be32_to_cpu(new_vh->vol_id) > tmp_av->vol_id) 386 p = &(*p)->rb_left; 387 else if (be32_to_cpu(new_vh->vol_id) < tmp_av->vol_id) 388 p = &(*p)->rb_right; 389 else { 390 found = 1; 391 break; 392 } 393 } 394 395 if (found) 396 av = tmp_av; 397 else { 398 ubi_err(ubi, "orphaned volume in fastmap pool!"); 399 kmem_cache_free(ai->aeb_slab_cache, new_aeb); 400 return UBI_BAD_FASTMAP; 401 } 402 403 ubi_assert(be32_to_cpu(new_vh->vol_id) == av->vol_id); 404 405 return update_vol(ubi, ai, av, new_vh, new_aeb); 406 } 407 408 /** 409 * unmap_peb - unmap a PEB. 410 * If fastmap detects a free PEB in the pool it has to check whether 411 * this PEB has been unmapped after writing the fastmap. 412 * 413 * @ai: UBI attach info object 414 * @pnum: The PEB to be unmapped 415 */ 416 static void unmap_peb(struct ubi_attach_info *ai, int pnum) 417 { 418 struct ubi_ainf_volume *av; 419 struct rb_node *node, *node2; 420 struct ubi_ainf_peb *aeb; 421 422 for (node = rb_first(&ai->volumes); node; node = rb_next(node)) { 423 av = rb_entry(node, struct ubi_ainf_volume, rb); 424 425 for (node2 = rb_first(&av->root); node2; 426 node2 = rb_next(node2)) { 427 aeb = rb_entry(node2, struct ubi_ainf_peb, u.rb); 428 if (aeb->pnum == pnum) { 429 rb_erase(&aeb->u.rb, &av->root); 430 av->leb_count--; 431 kmem_cache_free(ai->aeb_slab_cache, aeb); 432 return; 433 } 434 } 435 } 436 } 437 438 /** 439 * scan_pool - scans a pool for changed (no longer empty PEBs). 440 * @ubi: UBI device object 441 * @ai: attach info object 442 * @pebs: an array of all PEB numbers in the to be scanned pool 443 * @pool_size: size of the pool (number of entries in @pebs) 444 * @max_sqnum: pointer to the maximal sequence number 445 * @free: list of PEBs which are most likely free (and go into @ai->free) 446 * 447 * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned. 448 * < 0 indicates an internal error. 449 */ 450 static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai, 451 int *pebs, int pool_size, unsigned long long *max_sqnum, 452 struct list_head *free) 453 { 454 struct ubi_vid_hdr *vh; 455 struct ubi_ec_hdr *ech; 456 struct ubi_ainf_peb *new_aeb; 457 int i, pnum, err, ret = 0; 458 459 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); 460 if (!ech) 461 return -ENOMEM; 462 463 vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); 464 if (!vh) { 465 kfree(ech); 466 return -ENOMEM; 467 } 468 469 dbg_bld("scanning fastmap pool: size = %i", pool_size); 470 471 /* 472 * Now scan all PEBs in the pool to find changes which have been made 473 * after the creation of the fastmap 474 */ 475 for (i = 0; i < pool_size; i++) { 476 int scrub = 0; 477 int image_seq; 478 479 pnum = be32_to_cpu(pebs[i]); 480 481 if (ubi_io_is_bad(ubi, pnum)) { 482 ubi_err(ubi, "bad PEB in fastmap pool!"); 483 ret = UBI_BAD_FASTMAP; 484 goto out; 485 } 486 487 err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0); 488 if (err && err != UBI_IO_BITFLIPS) { 489 ubi_err(ubi, "unable to read EC header! PEB:%i err:%i", 490 pnum, err); 491 ret = err > 0 ? UBI_BAD_FASTMAP : err; 492 goto out; 493 } else if (err == UBI_IO_BITFLIPS) 494 scrub = 1; 495 496 /* 497 * Older UBI implementations have image_seq set to zero, so 498 * we shouldn't fail if image_seq == 0. 499 */ 500 image_seq = be32_to_cpu(ech->image_seq); 501 502 if (image_seq && (image_seq != ubi->image_seq)) { 503 ubi_err(ubi, "bad image seq: 0x%x, expected: 0x%x", 504 be32_to_cpu(ech->image_seq), ubi->image_seq); 505 ret = UBI_BAD_FASTMAP; 506 goto out; 507 } 508 509 err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0); 510 if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) { 511 unsigned long long ec = be64_to_cpu(ech->ec); 512 unmap_peb(ai, pnum); 513 dbg_bld("Adding PEB to free: %i", pnum); 514 if (err == UBI_IO_FF_BITFLIPS) 515 add_aeb(ai, free, pnum, ec, 1); 516 else 517 add_aeb(ai, free, pnum, ec, 0); 518 continue; 519 } else if (err == 0 || err == UBI_IO_BITFLIPS) { 520 dbg_bld("Found non empty PEB:%i in pool", pnum); 521 522 if (err == UBI_IO_BITFLIPS) 523 scrub = 1; 524 525 new_aeb = kmem_cache_alloc(ai->aeb_slab_cache, 526 GFP_KERNEL); 527 if (!new_aeb) { 528 ret = -ENOMEM; 529 goto out; 530 } 531 532 new_aeb->ec = be64_to_cpu(ech->ec); 533 new_aeb->pnum = pnum; 534 new_aeb->lnum = be32_to_cpu(vh->lnum); 535 new_aeb->sqnum = be64_to_cpu(vh->sqnum); 536 new_aeb->copy_flag = vh->copy_flag; 537 new_aeb->scrub = scrub; 538 539 if (*max_sqnum < new_aeb->sqnum) 540 *max_sqnum = new_aeb->sqnum; 541 542 err = process_pool_aeb(ubi, ai, vh, new_aeb); 543 if (err) { 544 ret = err > 0 ? UBI_BAD_FASTMAP : err; 545 goto out; 546 } 547 } else { 548 /* We are paranoid and fall back to scanning mode */ 549 ubi_err(ubi, "fastmap pool PEBs contains damaged PEBs!"); 550 ret = err > 0 ? UBI_BAD_FASTMAP : err; 551 goto out; 552 } 553 554 } 555 556 out: 557 ubi_free_vid_hdr(ubi, vh); 558 kfree(ech); 559 return ret; 560 } 561 562 /** 563 * count_fastmap_pebs - Counts the PEBs found by fastmap. 564 * @ai: The UBI attach info object 565 */ 566 static int count_fastmap_pebs(struct ubi_attach_info *ai) 567 { 568 struct ubi_ainf_peb *aeb; 569 struct ubi_ainf_volume *av; 570 struct rb_node *rb1, *rb2; 571 int n = 0; 572 573 list_for_each_entry(aeb, &ai->erase, u.list) 574 n++; 575 576 list_for_each_entry(aeb, &ai->free, u.list) 577 n++; 578 579 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) 580 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) 581 n++; 582 583 return n; 584 } 585 586 /** 587 * ubi_attach_fastmap - creates ubi_attach_info from a fastmap. 588 * @ubi: UBI device object 589 * @ai: UBI attach info object 590 * @fm: the fastmap to be attached 591 * 592 * Returns 0 on success, UBI_BAD_FASTMAP if the found fastmap was unusable. 593 * < 0 indicates an internal error. 594 */ 595 static int ubi_attach_fastmap(struct ubi_device *ubi, 596 struct ubi_attach_info *ai, 597 struct ubi_fastmap_layout *fm) 598 { 599 struct list_head used, free; 600 struct ubi_ainf_volume *av; 601 struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb; 602 struct ubi_fm_sb *fmsb; 603 struct ubi_fm_hdr *fmhdr; 604 struct ubi_fm_scan_pool *fmpl1, *fmpl2; 605 struct ubi_fm_ec *fmec; 606 struct ubi_fm_volhdr *fmvhdr; 607 struct ubi_fm_eba *fm_eba; 608 int ret, i, j, pool_size, wl_pool_size; 609 size_t fm_pos = 0, fm_size = ubi->fm_size; 610 unsigned long long max_sqnum = 0; 611 void *fm_raw = ubi->fm_buf; 612 613 INIT_LIST_HEAD(&used); 614 INIT_LIST_HEAD(&free); 615 ai->min_ec = UBI_MAX_ERASECOUNTER; 616 617 fmsb = (struct ubi_fm_sb *)(fm_raw); 618 ai->max_sqnum = fmsb->sqnum; 619 fm_pos += sizeof(struct ubi_fm_sb); 620 if (fm_pos >= fm_size) 621 goto fail_bad; 622 623 fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos); 624 fm_pos += sizeof(*fmhdr); 625 if (fm_pos >= fm_size) 626 goto fail_bad; 627 628 if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) { 629 ubi_err(ubi, "bad fastmap header magic: 0x%x, expected: 0x%x", 630 be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC); 631 goto fail_bad; 632 } 633 634 fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos); 635 fm_pos += sizeof(*fmpl1); 636 if (fm_pos >= fm_size) 637 goto fail_bad; 638 if (be32_to_cpu(fmpl1->magic) != UBI_FM_POOL_MAGIC) { 639 ubi_err(ubi, "bad fastmap pool magic: 0x%x, expected: 0x%x", 640 be32_to_cpu(fmpl1->magic), UBI_FM_POOL_MAGIC); 641 goto fail_bad; 642 } 643 644 fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos); 645 fm_pos += sizeof(*fmpl2); 646 if (fm_pos >= fm_size) 647 goto fail_bad; 648 if (be32_to_cpu(fmpl2->magic) != UBI_FM_POOL_MAGIC) { 649 ubi_err(ubi, "bad fastmap pool magic: 0x%x, expected: 0x%x", 650 be32_to_cpu(fmpl2->magic), UBI_FM_POOL_MAGIC); 651 goto fail_bad; 652 } 653 654 pool_size = be16_to_cpu(fmpl1->size); 655 wl_pool_size = be16_to_cpu(fmpl2->size); 656 fm->max_pool_size = be16_to_cpu(fmpl1->max_size); 657 fm->max_wl_pool_size = be16_to_cpu(fmpl2->max_size); 658 659 if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) { 660 ubi_err(ubi, "bad pool size: %i", pool_size); 661 goto fail_bad; 662 } 663 664 if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) { 665 ubi_err(ubi, "bad WL pool size: %i", wl_pool_size); 666 goto fail_bad; 667 } 668 669 670 if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE || 671 fm->max_pool_size < 0) { 672 ubi_err(ubi, "bad maximal pool size: %i", fm->max_pool_size); 673 goto fail_bad; 674 } 675 676 if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE || 677 fm->max_wl_pool_size < 0) { 678 ubi_err(ubi, "bad maximal WL pool size: %i", 679 fm->max_wl_pool_size); 680 goto fail_bad; 681 } 682 683 /* read EC values from free list */ 684 for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) { 685 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 686 fm_pos += sizeof(*fmec); 687 if (fm_pos >= fm_size) 688 goto fail_bad; 689 690 add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum), 691 be32_to_cpu(fmec->ec), 0); 692 } 693 694 /* read EC values from used list */ 695 for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) { 696 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 697 fm_pos += sizeof(*fmec); 698 if (fm_pos >= fm_size) 699 goto fail_bad; 700 701 add_aeb(ai, &used, be32_to_cpu(fmec->pnum), 702 be32_to_cpu(fmec->ec), 0); 703 } 704 705 /* read EC values from scrub list */ 706 for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) { 707 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 708 fm_pos += sizeof(*fmec); 709 if (fm_pos >= fm_size) 710 goto fail_bad; 711 712 add_aeb(ai, &used, be32_to_cpu(fmec->pnum), 713 be32_to_cpu(fmec->ec), 1); 714 } 715 716 /* read EC values from erase list */ 717 for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) { 718 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 719 fm_pos += sizeof(*fmec); 720 if (fm_pos >= fm_size) 721 goto fail_bad; 722 723 add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum), 724 be32_to_cpu(fmec->ec), 1); 725 } 726 727 ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count); 728 ai->bad_peb_count = be32_to_cpu(fmhdr->bad_peb_count); 729 730 /* Iterate over all volumes and read their EBA table */ 731 for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) { 732 fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos); 733 fm_pos += sizeof(*fmvhdr); 734 if (fm_pos >= fm_size) 735 goto fail_bad; 736 737 if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) { 738 ubi_err(ubi, "bad fastmap vol header magic: 0x%x, expected: 0x%x", 739 be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC); 740 goto fail_bad; 741 } 742 743 av = add_vol(ai, be32_to_cpu(fmvhdr->vol_id), 744 be32_to_cpu(fmvhdr->used_ebs), 745 be32_to_cpu(fmvhdr->data_pad), 746 fmvhdr->vol_type, 747 be32_to_cpu(fmvhdr->last_eb_bytes)); 748 749 if (!av) 750 goto fail_bad; 751 752 ai->vols_found++; 753 if (ai->highest_vol_id < be32_to_cpu(fmvhdr->vol_id)) 754 ai->highest_vol_id = be32_to_cpu(fmvhdr->vol_id); 755 756 fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos); 757 fm_pos += sizeof(*fm_eba); 758 fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs)); 759 if (fm_pos >= fm_size) 760 goto fail_bad; 761 762 if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) { 763 ubi_err(ubi, "bad fastmap EBA header magic: 0x%x, expected: 0x%x", 764 be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC); 765 goto fail_bad; 766 } 767 768 for (j = 0; j < be32_to_cpu(fm_eba->reserved_pebs); j++) { 769 int pnum = be32_to_cpu(fm_eba->pnum[j]); 770 771 if ((int)be32_to_cpu(fm_eba->pnum[j]) < 0) 772 continue; 773 774 aeb = NULL; 775 list_for_each_entry(tmp_aeb, &used, u.list) { 776 if (tmp_aeb->pnum == pnum) { 777 aeb = tmp_aeb; 778 break; 779 } 780 } 781 782 if (!aeb) { 783 ubi_err(ubi, "PEB %i is in EBA but not in used list", pnum); 784 goto fail_bad; 785 } 786 787 aeb->lnum = j; 788 789 if (av->highest_lnum <= aeb->lnum) 790 av->highest_lnum = aeb->lnum; 791 792 assign_aeb_to_av(ai, aeb, av); 793 794 dbg_bld("inserting PEB:%i (LEB %i) to vol %i", 795 aeb->pnum, aeb->lnum, av->vol_id); 796 } 797 } 798 799 ret = scan_pool(ubi, ai, fmpl1->pebs, pool_size, &max_sqnum, &free); 800 if (ret) 801 goto fail; 802 803 ret = scan_pool(ubi, ai, fmpl2->pebs, wl_pool_size, &max_sqnum, &free); 804 if (ret) 805 goto fail; 806 807 if (max_sqnum > ai->max_sqnum) 808 ai->max_sqnum = max_sqnum; 809 810 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) 811 list_move_tail(&tmp_aeb->u.list, &ai->free); 812 813 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) 814 list_move_tail(&tmp_aeb->u.list, &ai->erase); 815 816 ubi_assert(list_empty(&free)); 817 818 /* 819 * If fastmap is leaking PEBs (must not happen), raise a 820 * fat warning and fall back to scanning mode. 821 * We do this here because in ubi_wl_init() it's too late 822 * and we cannot fall back to scanning. 823 */ 824 if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count - 825 ai->bad_peb_count - fm->used_blocks)) 826 goto fail_bad; 827 828 return 0; 829 830 fail_bad: 831 ret = UBI_BAD_FASTMAP; 832 fail: 833 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) { 834 list_del(&tmp_aeb->u.list); 835 kmem_cache_free(ai->aeb_slab_cache, tmp_aeb); 836 } 837 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) { 838 list_del(&tmp_aeb->u.list); 839 kmem_cache_free(ai->aeb_slab_cache, tmp_aeb); 840 } 841 842 return ret; 843 } 844 845 /** 846 * ubi_scan_fastmap - scan the fastmap. 847 * @ubi: UBI device object 848 * @ai: UBI attach info to be filled 849 * @fm_anchor: The fastmap starts at this PEB 850 * 851 * Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found, 852 * UBI_BAD_FASTMAP if one was found but is not usable. 853 * < 0 indicates an internal error. 854 */ 855 int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai, 856 int fm_anchor) 857 { 858 struct ubi_fm_sb *fmsb, *fmsb2; 859 struct ubi_vid_hdr *vh; 860 struct ubi_ec_hdr *ech; 861 struct ubi_fastmap_layout *fm; 862 int i, used_blocks, pnum, ret = 0; 863 size_t fm_size; 864 __be32 crc, tmp_crc; 865 unsigned long long sqnum = 0; 866 867 down_write(&ubi->fm_protect); 868 memset(ubi->fm_buf, 0, ubi->fm_size); 869 870 fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL); 871 if (!fmsb) { 872 ret = -ENOMEM; 873 goto out; 874 } 875 876 fm = kzalloc(sizeof(*fm), GFP_KERNEL); 877 if (!fm) { 878 ret = -ENOMEM; 879 kfree(fmsb); 880 goto out; 881 } 882 883 ret = ubi_io_read(ubi, fmsb, fm_anchor, ubi->leb_start, sizeof(*fmsb)); 884 if (ret && ret != UBI_IO_BITFLIPS) 885 goto free_fm_sb; 886 else if (ret == UBI_IO_BITFLIPS) 887 fm->to_be_tortured[0] = 1; 888 889 if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) { 890 ubi_err(ubi, "bad super block magic: 0x%x, expected: 0x%x", 891 be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC); 892 ret = UBI_BAD_FASTMAP; 893 goto free_fm_sb; 894 } 895 896 if (fmsb->version != UBI_FM_FMT_VERSION) { 897 ubi_err(ubi, "bad fastmap version: %i, expected: %i", 898 fmsb->version, UBI_FM_FMT_VERSION); 899 ret = UBI_BAD_FASTMAP; 900 goto free_fm_sb; 901 } 902 903 used_blocks = be32_to_cpu(fmsb->used_blocks); 904 if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) { 905 ubi_err(ubi, "number of fastmap blocks is invalid: %i", 906 used_blocks); 907 ret = UBI_BAD_FASTMAP; 908 goto free_fm_sb; 909 } 910 911 fm_size = ubi->leb_size * used_blocks; 912 if (fm_size != ubi->fm_size) { 913 ubi_err(ubi, "bad fastmap size: %zi, expected: %zi", 914 fm_size, ubi->fm_size); 915 ret = UBI_BAD_FASTMAP; 916 goto free_fm_sb; 917 } 918 919 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); 920 if (!ech) { 921 ret = -ENOMEM; 922 goto free_fm_sb; 923 } 924 925 vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); 926 if (!vh) { 927 ret = -ENOMEM; 928 goto free_hdr; 929 } 930 931 for (i = 0; i < used_blocks; i++) { 932 int image_seq; 933 934 pnum = be32_to_cpu(fmsb->block_loc[i]); 935 936 if (ubi_io_is_bad(ubi, pnum)) { 937 ret = UBI_BAD_FASTMAP; 938 goto free_hdr; 939 } 940 941 ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0); 942 if (ret && ret != UBI_IO_BITFLIPS) { 943 ubi_err(ubi, "unable to read fastmap block# %i EC (PEB: %i)", 944 i, pnum); 945 if (ret > 0) 946 ret = UBI_BAD_FASTMAP; 947 goto free_hdr; 948 } else if (ret == UBI_IO_BITFLIPS) 949 fm->to_be_tortured[i] = 1; 950 951 image_seq = be32_to_cpu(ech->image_seq); 952 if (!ubi->image_seq) 953 ubi->image_seq = image_seq; 954 955 /* 956 * Older UBI implementations have image_seq set to zero, so 957 * we shouldn't fail if image_seq == 0. 958 */ 959 if (image_seq && (image_seq != ubi->image_seq)) { 960 ubi_err(ubi, "wrong image seq:%d instead of %d", 961 be32_to_cpu(ech->image_seq), ubi->image_seq); 962 ret = UBI_BAD_FASTMAP; 963 goto free_hdr; 964 } 965 966 ret = ubi_io_read_vid_hdr(ubi, pnum, vh, 0); 967 if (ret && ret != UBI_IO_BITFLIPS) { 968 ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i)", 969 i, pnum); 970 goto free_hdr; 971 } 972 973 if (i == 0) { 974 if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) { 975 ubi_err(ubi, "bad fastmap anchor vol_id: 0x%x, expected: 0x%x", 976 be32_to_cpu(vh->vol_id), 977 UBI_FM_SB_VOLUME_ID); 978 ret = UBI_BAD_FASTMAP; 979 goto free_hdr; 980 } 981 } else { 982 if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) { 983 ubi_err(ubi, "bad fastmap data vol_id: 0x%x, expected: 0x%x", 984 be32_to_cpu(vh->vol_id), 985 UBI_FM_DATA_VOLUME_ID); 986 ret = UBI_BAD_FASTMAP; 987 goto free_hdr; 988 } 989 } 990 991 if (sqnum < be64_to_cpu(vh->sqnum)) 992 sqnum = be64_to_cpu(vh->sqnum); 993 994 ret = ubi_io_read(ubi, ubi->fm_buf + (ubi->leb_size * i), pnum, 995 ubi->leb_start, ubi->leb_size); 996 if (ret && ret != UBI_IO_BITFLIPS) { 997 ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i, " 998 "err: %i)", i, pnum, ret); 999 goto free_hdr; 1000 } 1001 } 1002 1003 kfree(fmsb); 1004 fmsb = NULL; 1005 1006 fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf); 1007 tmp_crc = be32_to_cpu(fmsb2->data_crc); 1008 fmsb2->data_crc = 0; 1009 crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size); 1010 if (crc != tmp_crc) { 1011 ubi_err(ubi, "fastmap data CRC is invalid"); 1012 ubi_err(ubi, "CRC should be: 0x%x, calc: 0x%x", 1013 tmp_crc, crc); 1014 ret = UBI_BAD_FASTMAP; 1015 goto free_hdr; 1016 } 1017 1018 fmsb2->sqnum = sqnum; 1019 1020 fm->used_blocks = used_blocks; 1021 1022 ret = ubi_attach_fastmap(ubi, ai, fm); 1023 if (ret) { 1024 if (ret > 0) 1025 ret = UBI_BAD_FASTMAP; 1026 goto free_hdr; 1027 } 1028 1029 for (i = 0; i < used_blocks; i++) { 1030 struct ubi_wl_entry *e; 1031 1032 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); 1033 if (!e) { 1034 while (i--) 1035 kfree(fm->e[i]); 1036 1037 ret = -ENOMEM; 1038 goto free_hdr; 1039 } 1040 1041 e->pnum = be32_to_cpu(fmsb2->block_loc[i]); 1042 e->ec = be32_to_cpu(fmsb2->block_ec[i]); 1043 fm->e[i] = e; 1044 } 1045 1046 ubi->fm = fm; 1047 ubi->fm_pool.max_size = ubi->fm->max_pool_size; 1048 ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size; 1049 ubi_msg(ubi, "attached by fastmap"); 1050 ubi_msg(ubi, "fastmap pool size: %d", ubi->fm_pool.max_size); 1051 ubi_msg(ubi, "fastmap WL pool size: %d", 1052 ubi->fm_wl_pool.max_size); 1053 ubi->fm_disabled = 0; 1054 1055 ubi_free_vid_hdr(ubi, vh); 1056 kfree(ech); 1057 out: 1058 up_write(&ubi->fm_protect); 1059 if (ret == UBI_BAD_FASTMAP) 1060 ubi_err(ubi, "Attach by fastmap failed, doing a full scan!"); 1061 return ret; 1062 1063 free_hdr: 1064 ubi_free_vid_hdr(ubi, vh); 1065 kfree(ech); 1066 free_fm_sb: 1067 kfree(fmsb); 1068 kfree(fm); 1069 goto out; 1070 } 1071 1072 /** 1073 * ubi_write_fastmap - writes a fastmap. 1074 * @ubi: UBI device object 1075 * @new_fm: the to be written fastmap 1076 * 1077 * Returns 0 on success, < 0 indicates an internal error. 1078 */ 1079 static int ubi_write_fastmap(struct ubi_device *ubi, 1080 struct ubi_fastmap_layout *new_fm) 1081 { 1082 size_t fm_pos = 0; 1083 void *fm_raw; 1084 struct ubi_fm_sb *fmsb; 1085 struct ubi_fm_hdr *fmh; 1086 struct ubi_fm_scan_pool *fmpl1, *fmpl2; 1087 struct ubi_fm_ec *fec; 1088 struct ubi_fm_volhdr *fvh; 1089 struct ubi_fm_eba *feba; 1090 struct ubi_wl_entry *wl_e; 1091 struct ubi_volume *vol; 1092 struct ubi_vid_hdr *avhdr, *dvhdr; 1093 struct ubi_work *ubi_wrk; 1094 struct rb_node *tmp_rb; 1095 int ret, i, j, free_peb_count, used_peb_count, vol_count; 1096 int scrub_peb_count, erase_peb_count; 1097 int *seen_pebs = NULL; 1098 1099 fm_raw = ubi->fm_buf; 1100 memset(ubi->fm_buf, 0, ubi->fm_size); 1101 1102 avhdr = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID); 1103 if (!avhdr) { 1104 ret = -ENOMEM; 1105 goto out; 1106 } 1107 1108 dvhdr = new_fm_vhdr(ubi, UBI_FM_DATA_VOLUME_ID); 1109 if (!dvhdr) { 1110 ret = -ENOMEM; 1111 goto out_kfree; 1112 } 1113 1114 seen_pebs = init_seen(ubi); 1115 if (IS_ERR(seen_pebs)) { 1116 ret = PTR_ERR(seen_pebs); 1117 goto out_kfree; 1118 } 1119 1120 spin_lock(&ubi->volumes_lock); 1121 spin_lock(&ubi->wl_lock); 1122 1123 fmsb = (struct ubi_fm_sb *)fm_raw; 1124 fm_pos += sizeof(*fmsb); 1125 ubi_assert(fm_pos <= ubi->fm_size); 1126 1127 fmh = (struct ubi_fm_hdr *)(fm_raw + fm_pos); 1128 fm_pos += sizeof(*fmh); 1129 ubi_assert(fm_pos <= ubi->fm_size); 1130 1131 fmsb->magic = cpu_to_be32(UBI_FM_SB_MAGIC); 1132 fmsb->version = UBI_FM_FMT_VERSION; 1133 fmsb->used_blocks = cpu_to_be32(new_fm->used_blocks); 1134 /* the max sqnum will be filled in while *reading* the fastmap */ 1135 fmsb->sqnum = 0; 1136 1137 fmh->magic = cpu_to_be32(UBI_FM_HDR_MAGIC); 1138 free_peb_count = 0; 1139 used_peb_count = 0; 1140 scrub_peb_count = 0; 1141 erase_peb_count = 0; 1142 vol_count = 0; 1143 1144 fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos); 1145 fm_pos += sizeof(*fmpl1); 1146 fmpl1->magic = cpu_to_be32(UBI_FM_POOL_MAGIC); 1147 fmpl1->size = cpu_to_be16(ubi->fm_pool.size); 1148 fmpl1->max_size = cpu_to_be16(ubi->fm_pool.max_size); 1149 1150 for (i = 0; i < ubi->fm_pool.size; i++) { 1151 fmpl1->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]); 1152 set_seen(ubi, ubi->fm_pool.pebs[i], seen_pebs); 1153 } 1154 1155 fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos); 1156 fm_pos += sizeof(*fmpl2); 1157 fmpl2->magic = cpu_to_be32(UBI_FM_POOL_MAGIC); 1158 fmpl2->size = cpu_to_be16(ubi->fm_wl_pool.size); 1159 fmpl2->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size); 1160 1161 for (i = 0; i < ubi->fm_wl_pool.size; i++) { 1162 fmpl2->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]); 1163 set_seen(ubi, ubi->fm_wl_pool.pebs[i], seen_pebs); 1164 } 1165 1166 ubi_for_each_free_peb(ubi, wl_e, tmp_rb) { 1167 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 1168 1169 fec->pnum = cpu_to_be32(wl_e->pnum); 1170 set_seen(ubi, wl_e->pnum, seen_pebs); 1171 fec->ec = cpu_to_be32(wl_e->ec); 1172 1173 free_peb_count++; 1174 fm_pos += sizeof(*fec); 1175 ubi_assert(fm_pos <= ubi->fm_size); 1176 } 1177 fmh->free_peb_count = cpu_to_be32(free_peb_count); 1178 1179 ubi_for_each_used_peb(ubi, wl_e, tmp_rb) { 1180 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 1181 1182 fec->pnum = cpu_to_be32(wl_e->pnum); 1183 set_seen(ubi, wl_e->pnum, seen_pebs); 1184 fec->ec = cpu_to_be32(wl_e->ec); 1185 1186 used_peb_count++; 1187 fm_pos += sizeof(*fec); 1188 ubi_assert(fm_pos <= ubi->fm_size); 1189 } 1190 1191 ubi_for_each_protected_peb(ubi, i, wl_e) { 1192 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 1193 1194 fec->pnum = cpu_to_be32(wl_e->pnum); 1195 set_seen(ubi, wl_e->pnum, seen_pebs); 1196 fec->ec = cpu_to_be32(wl_e->ec); 1197 1198 used_peb_count++; 1199 fm_pos += sizeof(*fec); 1200 ubi_assert(fm_pos <= ubi->fm_size); 1201 } 1202 fmh->used_peb_count = cpu_to_be32(used_peb_count); 1203 1204 ubi_for_each_scrub_peb(ubi, wl_e, tmp_rb) { 1205 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 1206 1207 fec->pnum = cpu_to_be32(wl_e->pnum); 1208 set_seen(ubi, wl_e->pnum, seen_pebs); 1209 fec->ec = cpu_to_be32(wl_e->ec); 1210 1211 scrub_peb_count++; 1212 fm_pos += sizeof(*fec); 1213 ubi_assert(fm_pos <= ubi->fm_size); 1214 } 1215 fmh->scrub_peb_count = cpu_to_be32(scrub_peb_count); 1216 1217 1218 list_for_each_entry(ubi_wrk, &ubi->works, list) { 1219 if (ubi_is_erase_work(ubi_wrk)) { 1220 wl_e = ubi_wrk->e; 1221 ubi_assert(wl_e); 1222 1223 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 1224 1225 fec->pnum = cpu_to_be32(wl_e->pnum); 1226 set_seen(ubi, wl_e->pnum, seen_pebs); 1227 fec->ec = cpu_to_be32(wl_e->ec); 1228 1229 erase_peb_count++; 1230 fm_pos += sizeof(*fec); 1231 ubi_assert(fm_pos <= ubi->fm_size); 1232 } 1233 } 1234 fmh->erase_peb_count = cpu_to_be32(erase_peb_count); 1235 1236 for (i = 0; i < UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT; i++) { 1237 vol = ubi->volumes[i]; 1238 1239 if (!vol) 1240 continue; 1241 1242 vol_count++; 1243 1244 fvh = (struct ubi_fm_volhdr *)(fm_raw + fm_pos); 1245 fm_pos += sizeof(*fvh); 1246 ubi_assert(fm_pos <= ubi->fm_size); 1247 1248 fvh->magic = cpu_to_be32(UBI_FM_VHDR_MAGIC); 1249 fvh->vol_id = cpu_to_be32(vol->vol_id); 1250 fvh->vol_type = vol->vol_type; 1251 fvh->used_ebs = cpu_to_be32(vol->used_ebs); 1252 fvh->data_pad = cpu_to_be32(vol->data_pad); 1253 fvh->last_eb_bytes = cpu_to_be32(vol->last_eb_bytes); 1254 1255 ubi_assert(vol->vol_type == UBI_DYNAMIC_VOLUME || 1256 vol->vol_type == UBI_STATIC_VOLUME); 1257 1258 feba = (struct ubi_fm_eba *)(fm_raw + fm_pos); 1259 fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->reserved_pebs); 1260 ubi_assert(fm_pos <= ubi->fm_size); 1261 1262 for (j = 0; j < vol->reserved_pebs; j++) 1263 feba->pnum[j] = cpu_to_be32(vol->eba_tbl[j]); 1264 1265 feba->reserved_pebs = cpu_to_be32(j); 1266 feba->magic = cpu_to_be32(UBI_FM_EBA_MAGIC); 1267 } 1268 fmh->vol_count = cpu_to_be32(vol_count); 1269 fmh->bad_peb_count = cpu_to_be32(ubi->bad_peb_count); 1270 1271 avhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); 1272 avhdr->lnum = 0; 1273 1274 spin_unlock(&ubi->wl_lock); 1275 spin_unlock(&ubi->volumes_lock); 1276 1277 dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum); 1278 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avhdr); 1279 if (ret) { 1280 ubi_err(ubi, "unable to write vid_hdr to fastmap SB!"); 1281 goto out_kfree; 1282 } 1283 1284 for (i = 0; i < new_fm->used_blocks; i++) { 1285 fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum); 1286 set_seen(ubi, new_fm->e[i]->pnum, seen_pebs); 1287 fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec); 1288 } 1289 1290 fmsb->data_crc = 0; 1291 fmsb->data_crc = cpu_to_be32(crc32(UBI_CRC32_INIT, fm_raw, 1292 ubi->fm_size)); 1293 1294 for (i = 1; i < new_fm->used_blocks; i++) { 1295 dvhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); 1296 dvhdr->lnum = cpu_to_be32(i); 1297 dbg_bld("writing fastmap data to PEB %i sqnum %llu", 1298 new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum)); 1299 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvhdr); 1300 if (ret) { 1301 ubi_err(ubi, "unable to write vid_hdr to PEB %i!", 1302 new_fm->e[i]->pnum); 1303 goto out_kfree; 1304 } 1305 } 1306 1307 for (i = 0; i < new_fm->used_blocks; i++) { 1308 ret = ubi_io_write(ubi, fm_raw + (i * ubi->leb_size), 1309 new_fm->e[i]->pnum, ubi->leb_start, ubi->leb_size); 1310 if (ret) { 1311 ubi_err(ubi, "unable to write fastmap to PEB %i!", 1312 new_fm->e[i]->pnum); 1313 goto out_kfree; 1314 } 1315 } 1316 1317 ubi_assert(new_fm); 1318 ubi->fm = new_fm; 1319 1320 ret = self_check_seen(ubi, seen_pebs); 1321 dbg_bld("fastmap written!"); 1322 1323 out_kfree: 1324 ubi_free_vid_hdr(ubi, avhdr); 1325 ubi_free_vid_hdr(ubi, dvhdr); 1326 free_seen(seen_pebs); 1327 out: 1328 return ret; 1329 } 1330 1331 /** 1332 * erase_block - Manually erase a PEB. 1333 * @ubi: UBI device object 1334 * @pnum: PEB to be erased 1335 * 1336 * Returns the new EC value on success, < 0 indicates an internal error. 1337 */ 1338 static int erase_block(struct ubi_device *ubi, int pnum) 1339 { 1340 int ret; 1341 struct ubi_ec_hdr *ec_hdr; 1342 long long ec; 1343 1344 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); 1345 if (!ec_hdr) 1346 return -ENOMEM; 1347 1348 ret = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0); 1349 if (ret < 0) 1350 goto out; 1351 else if (ret && ret != UBI_IO_BITFLIPS) { 1352 ret = -EINVAL; 1353 goto out; 1354 } 1355 1356 ret = ubi_io_sync_erase(ubi, pnum, 0); 1357 if (ret < 0) 1358 goto out; 1359 1360 ec = be64_to_cpu(ec_hdr->ec); 1361 ec += ret; 1362 if (ec > UBI_MAX_ERASECOUNTER) { 1363 ret = -EINVAL; 1364 goto out; 1365 } 1366 1367 ec_hdr->ec = cpu_to_be64(ec); 1368 ret = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr); 1369 if (ret < 0) 1370 goto out; 1371 1372 ret = ec; 1373 out: 1374 kfree(ec_hdr); 1375 return ret; 1376 } 1377 1378 /** 1379 * invalidate_fastmap - destroys a fastmap. 1380 * @ubi: UBI device object 1381 * 1382 * This function ensures that upon next UBI attach a full scan 1383 * is issued. We need this if UBI is about to write a new fastmap 1384 * but is unable to do so. In this case we have two options: 1385 * a) Make sure that the current fastmap will not be usued upon 1386 * attach time and contine or b) fall back to RO mode to have the 1387 * current fastmap in a valid state. 1388 * Returns 0 on success, < 0 indicates an internal error. 1389 */ 1390 static int invalidate_fastmap(struct ubi_device *ubi) 1391 { 1392 int ret; 1393 struct ubi_fastmap_layout *fm; 1394 struct ubi_wl_entry *e; 1395 struct ubi_vid_hdr *vh = NULL; 1396 1397 if (!ubi->fm) 1398 return 0; 1399 1400 ubi->fm = NULL; 1401 1402 ret = -ENOMEM; 1403 fm = kzalloc(sizeof(*fm), GFP_KERNEL); 1404 if (!fm) 1405 goto out; 1406 1407 vh = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID); 1408 if (!vh) 1409 goto out_free_fm; 1410 1411 ret = -ENOSPC; 1412 e = ubi_wl_get_fm_peb(ubi, 1); 1413 if (!e) 1414 goto out_free_fm; 1415 1416 /* 1417 * Create fake fastmap such that UBI will fall back 1418 * to scanning mode. 1419 */ 1420 vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); 1421 ret = ubi_io_write_vid_hdr(ubi, e->pnum, vh); 1422 if (ret < 0) { 1423 ubi_wl_put_fm_peb(ubi, e, 0, 0); 1424 goto out_free_fm; 1425 } 1426 1427 fm->used_blocks = 1; 1428 fm->e[0] = e; 1429 1430 ubi->fm = fm; 1431 1432 out: 1433 ubi_free_vid_hdr(ubi, vh); 1434 return ret; 1435 1436 out_free_fm: 1437 kfree(fm); 1438 goto out; 1439 } 1440 1441 /** 1442 * return_fm_pebs - returns all PEBs used by a fastmap back to the 1443 * WL sub-system. 1444 * @ubi: UBI device object 1445 * @fm: fastmap layout object 1446 */ 1447 static void return_fm_pebs(struct ubi_device *ubi, 1448 struct ubi_fastmap_layout *fm) 1449 { 1450 int i; 1451 1452 if (!fm) 1453 return; 1454 1455 for (i = 0; i < fm->used_blocks; i++) { 1456 if (fm->e[i]) { 1457 ubi_wl_put_fm_peb(ubi, fm->e[i], i, 1458 fm->to_be_tortured[i]); 1459 fm->e[i] = NULL; 1460 } 1461 } 1462 } 1463 1464 /** 1465 * ubi_update_fastmap - will be called by UBI if a volume changes or 1466 * a fastmap pool becomes full. 1467 * @ubi: UBI device object 1468 * 1469 * Returns 0 on success, < 0 indicates an internal error. 1470 */ 1471 int ubi_update_fastmap(struct ubi_device *ubi) 1472 { 1473 int ret, i, j; 1474 struct ubi_fastmap_layout *new_fm, *old_fm; 1475 struct ubi_wl_entry *tmp_e; 1476 1477 down_write(&ubi->fm_protect); 1478 1479 ubi_refill_pools(ubi); 1480 1481 if (ubi->ro_mode || ubi->fm_disabled) { 1482 up_write(&ubi->fm_protect); 1483 return 0; 1484 } 1485 1486 ret = ubi_ensure_anchor_pebs(ubi); 1487 if (ret) { 1488 up_write(&ubi->fm_protect); 1489 return ret; 1490 } 1491 1492 new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL); 1493 if (!new_fm) { 1494 up_write(&ubi->fm_protect); 1495 return -ENOMEM; 1496 } 1497 1498 new_fm->used_blocks = ubi->fm_size / ubi->leb_size; 1499 old_fm = ubi->fm; 1500 ubi->fm = NULL; 1501 1502 if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) { 1503 ubi_err(ubi, "fastmap too large"); 1504 ret = -ENOSPC; 1505 goto err; 1506 } 1507 1508 for (i = 1; i < new_fm->used_blocks; i++) { 1509 spin_lock(&ubi->wl_lock); 1510 tmp_e = ubi_wl_get_fm_peb(ubi, 0); 1511 spin_unlock(&ubi->wl_lock); 1512 1513 if (!tmp_e) { 1514 if (old_fm && old_fm->e[i]) { 1515 ret = erase_block(ubi, old_fm->e[i]->pnum); 1516 if (ret < 0) { 1517 ubi_err(ubi, "could not erase old fastmap PEB"); 1518 1519 for (j = 1; j < i; j++) { 1520 ubi_wl_put_fm_peb(ubi, new_fm->e[j], 1521 j, 0); 1522 new_fm->e[j] = NULL; 1523 } 1524 goto err; 1525 } 1526 new_fm->e[i] = old_fm->e[i]; 1527 old_fm->e[i] = NULL; 1528 } else { 1529 ubi_err(ubi, "could not get any free erase block"); 1530 1531 for (j = 1; j < i; j++) { 1532 ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0); 1533 new_fm->e[j] = NULL; 1534 } 1535 1536 ret = -ENOSPC; 1537 goto err; 1538 } 1539 } else { 1540 new_fm->e[i] = tmp_e; 1541 1542 if (old_fm && old_fm->e[i]) { 1543 ubi_wl_put_fm_peb(ubi, old_fm->e[i], i, 1544 old_fm->to_be_tortured[i]); 1545 old_fm->e[i] = NULL; 1546 } 1547 } 1548 } 1549 1550 /* Old fastmap is larger than the new one */ 1551 if (old_fm && new_fm->used_blocks < old_fm->used_blocks) { 1552 for (i = new_fm->used_blocks; i < old_fm->used_blocks; i++) { 1553 ubi_wl_put_fm_peb(ubi, old_fm->e[i], i, 1554 old_fm->to_be_tortured[i]); 1555 old_fm->e[i] = NULL; 1556 } 1557 } 1558 1559 spin_lock(&ubi->wl_lock); 1560 tmp_e = ubi_wl_get_fm_peb(ubi, 1); 1561 spin_unlock(&ubi->wl_lock); 1562 1563 if (old_fm) { 1564 /* no fresh anchor PEB was found, reuse the old one */ 1565 if (!tmp_e) { 1566 ret = erase_block(ubi, old_fm->e[0]->pnum); 1567 if (ret < 0) { 1568 ubi_err(ubi, "could not erase old anchor PEB"); 1569 1570 for (i = 1; i < new_fm->used_blocks; i++) { 1571 ubi_wl_put_fm_peb(ubi, new_fm->e[i], 1572 i, 0); 1573 new_fm->e[i] = NULL; 1574 } 1575 goto err; 1576 } 1577 new_fm->e[0] = old_fm->e[0]; 1578 new_fm->e[0]->ec = ret; 1579 old_fm->e[0] = NULL; 1580 } else { 1581 /* we've got a new anchor PEB, return the old one */ 1582 ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0, 1583 old_fm->to_be_tortured[0]); 1584 new_fm->e[0] = tmp_e; 1585 old_fm->e[0] = NULL; 1586 } 1587 } else { 1588 if (!tmp_e) { 1589 ubi_err(ubi, "could not find any anchor PEB"); 1590 1591 for (i = 1; i < new_fm->used_blocks; i++) { 1592 ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0); 1593 new_fm->e[i] = NULL; 1594 } 1595 1596 ret = -ENOSPC; 1597 goto err; 1598 } 1599 new_fm->e[0] = tmp_e; 1600 } 1601 1602 down_write(&ubi->work_sem); 1603 down_write(&ubi->fm_eba_sem); 1604 ret = ubi_write_fastmap(ubi, new_fm); 1605 up_write(&ubi->fm_eba_sem); 1606 up_write(&ubi->work_sem); 1607 1608 if (ret) 1609 goto err; 1610 1611 out_unlock: 1612 up_write(&ubi->fm_protect); 1613 kfree(old_fm); 1614 return ret; 1615 1616 err: 1617 ubi_warn(ubi, "Unable to write new fastmap, err=%i", ret); 1618 1619 ret = invalidate_fastmap(ubi); 1620 if (ret < 0) { 1621 ubi_err(ubi, "Unable to invalidiate current fastmap!"); 1622 ubi_ro_mode(ubi); 1623 } else { 1624 return_fm_pebs(ubi, old_fm); 1625 return_fm_pebs(ubi, new_fm); 1626 ret = 0; 1627 } 1628 1629 kfree(new_fm); 1630 goto out_unlock; 1631 } 1632