1 /* 2 * Copyright (c) 2012 Linutronix GmbH 3 * Author: Richard Weinberger <richard@nod.at> 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; version 2. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See 12 * the GNU General Public License for more details. 13 * 14 */ 15 16 #include <linux/crc32.h> 17 #include "ubi.h" 18 19 /** 20 * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device. 21 * @ubi: UBI device description object 22 */ 23 size_t ubi_calc_fm_size(struct ubi_device *ubi) 24 { 25 size_t size; 26 27 size = sizeof(struct ubi_fm_hdr) + \ 28 sizeof(struct ubi_fm_scan_pool) + \ 29 sizeof(struct ubi_fm_scan_pool) + \ 30 (ubi->peb_count * sizeof(struct ubi_fm_ec)) + \ 31 (sizeof(struct ubi_fm_eba) + \ 32 (ubi->peb_count * sizeof(__be32))) + \ 33 sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES; 34 return roundup(size, ubi->leb_size); 35 } 36 37 38 /** 39 * new_fm_vhdr - allocate a new volume header for fastmap usage. 40 * @ubi: UBI device description object 41 * @vol_id: the VID of the new header 42 * 43 * Returns a new struct ubi_vid_hdr on success. 44 * NULL indicates out of memory. 45 */ 46 static struct ubi_vid_hdr *new_fm_vhdr(struct ubi_device *ubi, int vol_id) 47 { 48 struct ubi_vid_hdr *new; 49 50 new = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); 51 if (!new) 52 goto out; 53 54 new->vol_type = UBI_VID_DYNAMIC; 55 new->vol_id = cpu_to_be32(vol_id); 56 57 /* UBI implementations without fastmap support have to delete the 58 * fastmap. 59 */ 60 new->compat = UBI_COMPAT_DELETE; 61 62 out: 63 return new; 64 } 65 66 /** 67 * add_aeb - create and add a attach erase block to a given list. 68 * @ai: UBI attach info object 69 * @list: the target list 70 * @pnum: PEB number of the new attach erase block 71 * @ec: erease counter of the new LEB 72 * @scrub: scrub this PEB after attaching 73 * 74 * Returns 0 on success, < 0 indicates an internal error. 75 */ 76 static int add_aeb(struct ubi_attach_info *ai, struct list_head *list, 77 int pnum, int ec, int scrub) 78 { 79 struct ubi_ainf_peb *aeb; 80 81 aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL); 82 if (!aeb) 83 return -ENOMEM; 84 85 aeb->pnum = pnum; 86 aeb->ec = ec; 87 aeb->lnum = -1; 88 aeb->scrub = scrub; 89 aeb->copy_flag = aeb->sqnum = 0; 90 91 ai->ec_sum += aeb->ec; 92 ai->ec_count++; 93 94 if (ai->max_ec < aeb->ec) 95 ai->max_ec = aeb->ec; 96 97 if (ai->min_ec > aeb->ec) 98 ai->min_ec = aeb->ec; 99 100 list_add_tail(&aeb->u.list, list); 101 102 return 0; 103 } 104 105 /** 106 * add_vol - create and add a new volume to ubi_attach_info. 107 * @ai: ubi_attach_info object 108 * @vol_id: VID of the new volume 109 * @used_ebs: number of used EBS 110 * @data_pad: data padding value of the new volume 111 * @vol_type: volume type 112 * @last_eb_bytes: number of bytes in the last LEB 113 * 114 * Returns the new struct ubi_ainf_volume on success. 115 * NULL indicates an error. 116 */ 117 static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id, 118 int used_ebs, int data_pad, u8 vol_type, 119 int last_eb_bytes) 120 { 121 struct ubi_ainf_volume *av; 122 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL; 123 124 while (*p) { 125 parent = *p; 126 av = rb_entry(parent, struct ubi_ainf_volume, rb); 127 128 if (vol_id > av->vol_id) 129 p = &(*p)->rb_left; 130 else if (vol_id > av->vol_id) 131 p = &(*p)->rb_right; 132 } 133 134 av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL); 135 if (!av) 136 goto out; 137 138 av->highest_lnum = av->leb_count = 0; 139 av->vol_id = vol_id; 140 av->used_ebs = used_ebs; 141 av->data_pad = data_pad; 142 av->last_data_size = last_eb_bytes; 143 av->compat = 0; 144 av->vol_type = vol_type; 145 av->root = RB_ROOT; 146 147 dbg_bld("found volume (ID %i)", vol_id); 148 149 rb_link_node(&av->rb, parent, p); 150 rb_insert_color(&av->rb, &ai->volumes); 151 152 out: 153 return av; 154 } 155 156 /** 157 * assign_aeb_to_av - assigns a SEB to a given ainf_volume and removes it 158 * from it's original list. 159 * @ai: ubi_attach_info object 160 * @aeb: the to be assigned SEB 161 * @av: target scan volume 162 */ 163 static void assign_aeb_to_av(struct ubi_attach_info *ai, 164 struct ubi_ainf_peb *aeb, 165 struct ubi_ainf_volume *av) 166 { 167 struct ubi_ainf_peb *tmp_aeb; 168 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL; 169 170 p = &av->root.rb_node; 171 while (*p) { 172 parent = *p; 173 174 tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb); 175 if (aeb->lnum != tmp_aeb->lnum) { 176 if (aeb->lnum < tmp_aeb->lnum) 177 p = &(*p)->rb_left; 178 else 179 p = &(*p)->rb_right; 180 181 continue; 182 } else 183 break; 184 } 185 186 list_del(&aeb->u.list); 187 av->leb_count++; 188 189 rb_link_node(&aeb->u.rb, parent, p); 190 rb_insert_color(&aeb->u.rb, &av->root); 191 } 192 193 /** 194 * update_vol - inserts or updates a LEB which was found a pool. 195 * @ubi: the UBI device object 196 * @ai: attach info object 197 * @av: the volume this LEB belongs to 198 * @new_vh: the volume header derived from new_aeb 199 * @new_aeb: the AEB to be examined 200 * 201 * Returns 0 on success, < 0 indicates an internal error. 202 */ 203 static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai, 204 struct ubi_ainf_volume *av, struct ubi_vid_hdr *new_vh, 205 struct ubi_ainf_peb *new_aeb) 206 { 207 struct rb_node **p = &av->root.rb_node, *parent = NULL; 208 struct ubi_ainf_peb *aeb, *victim; 209 int cmp_res; 210 211 while (*p) { 212 parent = *p; 213 aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb); 214 215 if (be32_to_cpu(new_vh->lnum) != aeb->lnum) { 216 if (be32_to_cpu(new_vh->lnum) < aeb->lnum) 217 p = &(*p)->rb_left; 218 else 219 p = &(*p)->rb_right; 220 221 continue; 222 } 223 224 /* This case can happen if the fastmap gets written 225 * because of a volume change (creation, deletion, ..). 226 * Then a PEB can be within the persistent EBA and the pool. 227 */ 228 if (aeb->pnum == new_aeb->pnum) { 229 ubi_assert(aeb->lnum == new_aeb->lnum); 230 kmem_cache_free(ai->aeb_slab_cache, new_aeb); 231 232 return 0; 233 } 234 235 cmp_res = ubi_compare_lebs(ubi, aeb, new_aeb->pnum, new_vh); 236 if (cmp_res < 0) 237 return cmp_res; 238 239 /* new_aeb is newer */ 240 if (cmp_res & 1) { 241 victim = kmem_cache_alloc(ai->aeb_slab_cache, 242 GFP_KERNEL); 243 if (!victim) 244 return -ENOMEM; 245 246 victim->ec = aeb->ec; 247 victim->pnum = aeb->pnum; 248 list_add_tail(&victim->u.list, &ai->erase); 249 250 if (av->highest_lnum == be32_to_cpu(new_vh->lnum)) 251 av->last_data_size = \ 252 be32_to_cpu(new_vh->data_size); 253 254 dbg_bld("vol %i: AEB %i's PEB %i is the newer", 255 av->vol_id, aeb->lnum, new_aeb->pnum); 256 257 aeb->ec = new_aeb->ec; 258 aeb->pnum = new_aeb->pnum; 259 aeb->copy_flag = new_vh->copy_flag; 260 aeb->scrub = new_aeb->scrub; 261 kmem_cache_free(ai->aeb_slab_cache, new_aeb); 262 263 /* new_aeb is older */ 264 } else { 265 dbg_bld("vol %i: AEB %i's PEB %i is old, dropping it", 266 av->vol_id, aeb->lnum, new_aeb->pnum); 267 list_add_tail(&new_aeb->u.list, &ai->erase); 268 } 269 270 return 0; 271 } 272 /* This LEB is new, let's add it to the volume */ 273 274 if (av->highest_lnum <= be32_to_cpu(new_vh->lnum)) { 275 av->highest_lnum = be32_to_cpu(new_vh->lnum); 276 av->last_data_size = be32_to_cpu(new_vh->data_size); 277 } 278 279 if (av->vol_type == UBI_STATIC_VOLUME) 280 av->used_ebs = be32_to_cpu(new_vh->used_ebs); 281 282 av->leb_count++; 283 284 rb_link_node(&new_aeb->u.rb, parent, p); 285 rb_insert_color(&new_aeb->u.rb, &av->root); 286 287 return 0; 288 } 289 290 /** 291 * process_pool_aeb - we found a non-empty PEB in a pool. 292 * @ubi: UBI device object 293 * @ai: attach info object 294 * @new_vh: the volume header derived from new_aeb 295 * @new_aeb: the AEB to be examined 296 * 297 * Returns 0 on success, < 0 indicates an internal error. 298 */ 299 static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai, 300 struct ubi_vid_hdr *new_vh, 301 struct ubi_ainf_peb *new_aeb) 302 { 303 struct ubi_ainf_volume *av, *tmp_av = NULL; 304 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL; 305 int found = 0; 306 307 if (be32_to_cpu(new_vh->vol_id) == UBI_FM_SB_VOLUME_ID || 308 be32_to_cpu(new_vh->vol_id) == UBI_FM_DATA_VOLUME_ID) { 309 kmem_cache_free(ai->aeb_slab_cache, new_aeb); 310 311 return 0; 312 } 313 314 /* Find the volume this SEB belongs to */ 315 while (*p) { 316 parent = *p; 317 tmp_av = rb_entry(parent, struct ubi_ainf_volume, rb); 318 319 if (be32_to_cpu(new_vh->vol_id) > tmp_av->vol_id) 320 p = &(*p)->rb_left; 321 else if (be32_to_cpu(new_vh->vol_id) < tmp_av->vol_id) 322 p = &(*p)->rb_right; 323 else { 324 found = 1; 325 break; 326 } 327 } 328 329 if (found) 330 av = tmp_av; 331 else { 332 ubi_err("orphaned volume in fastmap pool!"); 333 return UBI_BAD_FASTMAP; 334 } 335 336 ubi_assert(be32_to_cpu(new_vh->vol_id) == av->vol_id); 337 338 return update_vol(ubi, ai, av, new_vh, new_aeb); 339 } 340 341 /** 342 * unmap_peb - unmap a PEB. 343 * If fastmap detects a free PEB in the pool it has to check whether 344 * this PEB has been unmapped after writing the fastmap. 345 * 346 * @ai: UBI attach info object 347 * @pnum: The PEB to be unmapped 348 */ 349 static void unmap_peb(struct ubi_attach_info *ai, int pnum) 350 { 351 struct ubi_ainf_volume *av; 352 struct rb_node *node, *node2; 353 struct ubi_ainf_peb *aeb; 354 355 for (node = rb_first(&ai->volumes); node; node = rb_next(node)) { 356 av = rb_entry(node, struct ubi_ainf_volume, rb); 357 358 for (node2 = rb_first(&av->root); node2; 359 node2 = rb_next(node2)) { 360 aeb = rb_entry(node2, struct ubi_ainf_peb, u.rb); 361 if (aeb->pnum == pnum) { 362 rb_erase(&aeb->u.rb, &av->root); 363 kmem_cache_free(ai->aeb_slab_cache, aeb); 364 return; 365 } 366 } 367 } 368 } 369 370 /** 371 * scan_pool - scans a pool for changed (no longer empty PEBs). 372 * @ubi: UBI device object 373 * @ai: attach info object 374 * @pebs: an array of all PEB numbers in the to be scanned pool 375 * @pool_size: size of the pool (number of entries in @pebs) 376 * @max_sqnum: pointer to the maximal sequence number 377 * @eba_orphans: list of PEBs which need to be scanned 378 * @free: list of PEBs which are most likely free (and go into @ai->free) 379 * 380 * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned. 381 * < 0 indicates an internal error. 382 */ 383 static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai, 384 int *pebs, int pool_size, unsigned long long *max_sqnum, 385 struct list_head *eba_orphans, struct list_head *free) 386 { 387 struct ubi_vid_hdr *vh; 388 struct ubi_ec_hdr *ech; 389 struct ubi_ainf_peb *new_aeb, *tmp_aeb; 390 int i, pnum, err, found_orphan, ret = 0; 391 392 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); 393 if (!ech) 394 return -ENOMEM; 395 396 vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); 397 if (!vh) { 398 kfree(ech); 399 return -ENOMEM; 400 } 401 402 dbg_bld("scanning fastmap pool: size = %i", pool_size); 403 404 /* 405 * Now scan all PEBs in the pool to find changes which have been made 406 * after the creation of the fastmap 407 */ 408 for (i = 0; i < pool_size; i++) { 409 int scrub = 0; 410 int image_seq; 411 412 pnum = be32_to_cpu(pebs[i]); 413 414 if (ubi_io_is_bad(ubi, pnum)) { 415 ubi_err("bad PEB in fastmap pool!"); 416 ret = UBI_BAD_FASTMAP; 417 goto out; 418 } 419 420 err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0); 421 if (err && err != UBI_IO_BITFLIPS) { 422 ubi_err("unable to read EC header! PEB:%i err:%i", 423 pnum, err); 424 ret = err > 0 ? UBI_BAD_FASTMAP : err; 425 goto out; 426 } else if (ret == UBI_IO_BITFLIPS) 427 scrub = 1; 428 429 /* 430 * Older UBI implementations have image_seq set to zero, so 431 * we shouldn't fail if image_seq == 0. 432 */ 433 image_seq = be32_to_cpu(ech->image_seq); 434 435 if (image_seq && (image_seq != ubi->image_seq)) { 436 ubi_err("bad image seq: 0x%x, expected: 0x%x", 437 be32_to_cpu(ech->image_seq), ubi->image_seq); 438 ret = UBI_BAD_FASTMAP; 439 goto out; 440 } 441 442 err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0); 443 if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) { 444 unsigned long long ec = be64_to_cpu(ech->ec); 445 unmap_peb(ai, pnum); 446 dbg_bld("Adding PEB to free: %i", pnum); 447 if (err == UBI_IO_FF_BITFLIPS) 448 add_aeb(ai, free, pnum, ec, 1); 449 else 450 add_aeb(ai, free, pnum, ec, 0); 451 continue; 452 } else if (err == 0 || err == UBI_IO_BITFLIPS) { 453 dbg_bld("Found non empty PEB:%i in pool", pnum); 454 455 if (err == UBI_IO_BITFLIPS) 456 scrub = 1; 457 458 found_orphan = 0; 459 list_for_each_entry(tmp_aeb, eba_orphans, u.list) { 460 if (tmp_aeb->pnum == pnum) { 461 found_orphan = 1; 462 break; 463 } 464 } 465 if (found_orphan) { 466 kmem_cache_free(ai->aeb_slab_cache, tmp_aeb); 467 list_del(&tmp_aeb->u.list); 468 } 469 470 new_aeb = kmem_cache_alloc(ai->aeb_slab_cache, 471 GFP_KERNEL); 472 if (!new_aeb) { 473 ret = -ENOMEM; 474 goto out; 475 } 476 477 new_aeb->ec = be64_to_cpu(ech->ec); 478 new_aeb->pnum = pnum; 479 new_aeb->lnum = be32_to_cpu(vh->lnum); 480 new_aeb->sqnum = be64_to_cpu(vh->sqnum); 481 new_aeb->copy_flag = vh->copy_flag; 482 new_aeb->scrub = scrub; 483 484 if (*max_sqnum < new_aeb->sqnum) 485 *max_sqnum = new_aeb->sqnum; 486 487 err = process_pool_aeb(ubi, ai, vh, new_aeb); 488 if (err) { 489 ret = err > 0 ? UBI_BAD_FASTMAP : err; 490 goto out; 491 } 492 } else { 493 /* We are paranoid and fall back to scanning mode */ 494 ubi_err("fastmap pool PEBs contains damaged PEBs!"); 495 ret = err > 0 ? UBI_BAD_FASTMAP : err; 496 goto out; 497 } 498 499 } 500 501 out: 502 ubi_free_vid_hdr(ubi, vh); 503 kfree(ech); 504 return ret; 505 } 506 507 /** 508 * count_fastmap_pebs - Counts the PEBs found by fastmap. 509 * @ai: The UBI attach info object 510 */ 511 static int count_fastmap_pebs(struct ubi_attach_info *ai) 512 { 513 struct ubi_ainf_peb *aeb; 514 struct ubi_ainf_volume *av; 515 struct rb_node *rb1, *rb2; 516 int n = 0; 517 518 list_for_each_entry(aeb, &ai->erase, u.list) 519 n++; 520 521 list_for_each_entry(aeb, &ai->free, u.list) 522 n++; 523 524 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) 525 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) 526 n++; 527 528 return n; 529 } 530 531 /** 532 * ubi_attach_fastmap - creates ubi_attach_info from a fastmap. 533 * @ubi: UBI device object 534 * @ai: UBI attach info object 535 * @fm: the fastmap to be attached 536 * 537 * Returns 0 on success, UBI_BAD_FASTMAP if the found fastmap was unusable. 538 * < 0 indicates an internal error. 539 */ 540 static int ubi_attach_fastmap(struct ubi_device *ubi, 541 struct ubi_attach_info *ai, 542 struct ubi_fastmap_layout *fm) 543 { 544 struct list_head used, eba_orphans, free; 545 struct ubi_ainf_volume *av; 546 struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb; 547 struct ubi_ec_hdr *ech; 548 struct ubi_fm_sb *fmsb; 549 struct ubi_fm_hdr *fmhdr; 550 struct ubi_fm_scan_pool *fmpl1, *fmpl2; 551 struct ubi_fm_ec *fmec; 552 struct ubi_fm_volhdr *fmvhdr; 553 struct ubi_fm_eba *fm_eba; 554 int ret, i, j, pool_size, wl_pool_size; 555 size_t fm_pos = 0, fm_size = ubi->fm_size; 556 unsigned long long max_sqnum = 0; 557 void *fm_raw = ubi->fm_buf; 558 559 INIT_LIST_HEAD(&used); 560 INIT_LIST_HEAD(&free); 561 INIT_LIST_HEAD(&eba_orphans); 562 INIT_LIST_HEAD(&ai->corr); 563 INIT_LIST_HEAD(&ai->free); 564 INIT_LIST_HEAD(&ai->erase); 565 INIT_LIST_HEAD(&ai->alien); 566 ai->volumes = RB_ROOT; 567 ai->min_ec = UBI_MAX_ERASECOUNTER; 568 569 ai->aeb_slab_cache = kmem_cache_create("ubi_ainf_peb_slab", 570 sizeof(struct ubi_ainf_peb), 571 0, 0, NULL); 572 if (!ai->aeb_slab_cache) { 573 ret = -ENOMEM; 574 goto fail; 575 } 576 577 fmsb = (struct ubi_fm_sb *)(fm_raw); 578 ai->max_sqnum = fmsb->sqnum; 579 fm_pos += sizeof(struct ubi_fm_sb); 580 if (fm_pos >= fm_size) 581 goto fail_bad; 582 583 fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos); 584 fm_pos += sizeof(*fmhdr); 585 if (fm_pos >= fm_size) 586 goto fail_bad; 587 588 if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) { 589 ubi_err("bad fastmap header magic: 0x%x, expected: 0x%x", 590 be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC); 591 goto fail_bad; 592 } 593 594 fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos); 595 fm_pos += sizeof(*fmpl1); 596 if (fm_pos >= fm_size) 597 goto fail_bad; 598 if (be32_to_cpu(fmpl1->magic) != UBI_FM_POOL_MAGIC) { 599 ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x", 600 be32_to_cpu(fmpl1->magic), UBI_FM_POOL_MAGIC); 601 goto fail_bad; 602 } 603 604 fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos); 605 fm_pos += sizeof(*fmpl2); 606 if (fm_pos >= fm_size) 607 goto fail_bad; 608 if (be32_to_cpu(fmpl2->magic) != UBI_FM_POOL_MAGIC) { 609 ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x", 610 be32_to_cpu(fmpl2->magic), UBI_FM_POOL_MAGIC); 611 goto fail_bad; 612 } 613 614 pool_size = be16_to_cpu(fmpl1->size); 615 wl_pool_size = be16_to_cpu(fmpl2->size); 616 fm->max_pool_size = be16_to_cpu(fmpl1->max_size); 617 fm->max_wl_pool_size = be16_to_cpu(fmpl2->max_size); 618 619 if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) { 620 ubi_err("bad pool size: %i", pool_size); 621 goto fail_bad; 622 } 623 624 if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) { 625 ubi_err("bad WL pool size: %i", wl_pool_size); 626 goto fail_bad; 627 } 628 629 630 if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE || 631 fm->max_pool_size < 0) { 632 ubi_err("bad maximal pool size: %i", fm->max_pool_size); 633 goto fail_bad; 634 } 635 636 if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE || 637 fm->max_wl_pool_size < 0) { 638 ubi_err("bad maximal WL pool size: %i", fm->max_wl_pool_size); 639 goto fail_bad; 640 } 641 642 /* read EC values from free list */ 643 for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) { 644 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 645 fm_pos += sizeof(*fmec); 646 if (fm_pos >= fm_size) 647 goto fail_bad; 648 649 add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum), 650 be32_to_cpu(fmec->ec), 0); 651 } 652 653 /* read EC values from used list */ 654 for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) { 655 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 656 fm_pos += sizeof(*fmec); 657 if (fm_pos >= fm_size) 658 goto fail_bad; 659 660 add_aeb(ai, &used, be32_to_cpu(fmec->pnum), 661 be32_to_cpu(fmec->ec), 0); 662 } 663 664 /* read EC values from scrub list */ 665 for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) { 666 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 667 fm_pos += sizeof(*fmec); 668 if (fm_pos >= fm_size) 669 goto fail_bad; 670 671 add_aeb(ai, &used, be32_to_cpu(fmec->pnum), 672 be32_to_cpu(fmec->ec), 1); 673 } 674 675 /* read EC values from erase list */ 676 for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) { 677 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 678 fm_pos += sizeof(*fmec); 679 if (fm_pos >= fm_size) 680 goto fail_bad; 681 682 add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum), 683 be32_to_cpu(fmec->ec), 1); 684 } 685 686 ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count); 687 ai->bad_peb_count = be32_to_cpu(fmhdr->bad_peb_count); 688 689 /* Iterate over all volumes and read their EBA table */ 690 for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) { 691 fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos); 692 fm_pos += sizeof(*fmvhdr); 693 if (fm_pos >= fm_size) 694 goto fail_bad; 695 696 if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) { 697 ubi_err("bad fastmap vol header magic: 0x%x, " \ 698 "expected: 0x%x", 699 be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC); 700 goto fail_bad; 701 } 702 703 av = add_vol(ai, be32_to_cpu(fmvhdr->vol_id), 704 be32_to_cpu(fmvhdr->used_ebs), 705 be32_to_cpu(fmvhdr->data_pad), 706 fmvhdr->vol_type, 707 be32_to_cpu(fmvhdr->last_eb_bytes)); 708 709 if (!av) 710 goto fail_bad; 711 712 ai->vols_found++; 713 if (ai->highest_vol_id < be32_to_cpu(fmvhdr->vol_id)) 714 ai->highest_vol_id = be32_to_cpu(fmvhdr->vol_id); 715 716 fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos); 717 fm_pos += sizeof(*fm_eba); 718 fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs)); 719 if (fm_pos >= fm_size) 720 goto fail_bad; 721 722 if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) { 723 ubi_err("bad fastmap EBA header magic: 0x%x, " \ 724 "expected: 0x%x", 725 be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC); 726 goto fail_bad; 727 } 728 729 for (j = 0; j < be32_to_cpu(fm_eba->reserved_pebs); j++) { 730 int pnum = be32_to_cpu(fm_eba->pnum[j]); 731 732 if ((int)be32_to_cpu(fm_eba->pnum[j]) < 0) 733 continue; 734 735 aeb = NULL; 736 list_for_each_entry(tmp_aeb, &used, u.list) { 737 if (tmp_aeb->pnum == pnum) { 738 aeb = tmp_aeb; 739 break; 740 } 741 } 742 743 /* This can happen if a PEB is already in an EBA known 744 * by this fastmap but the PEB itself is not in the used 745 * list. 746 * In this case the PEB can be within the fastmap pool 747 * or while writing the fastmap it was in the protection 748 * queue. 749 */ 750 if (!aeb) { 751 aeb = kmem_cache_alloc(ai->aeb_slab_cache, 752 GFP_KERNEL); 753 if (!aeb) { 754 ret = -ENOMEM; 755 756 goto fail; 757 } 758 759 aeb->lnum = j; 760 aeb->pnum = be32_to_cpu(fm_eba->pnum[j]); 761 aeb->ec = -1; 762 aeb->scrub = aeb->copy_flag = aeb->sqnum = 0; 763 list_add_tail(&aeb->u.list, &eba_orphans); 764 continue; 765 } 766 767 aeb->lnum = j; 768 769 if (av->highest_lnum <= aeb->lnum) 770 av->highest_lnum = aeb->lnum; 771 772 assign_aeb_to_av(ai, aeb, av); 773 774 dbg_bld("inserting PEB:%i (LEB %i) to vol %i", 775 aeb->pnum, aeb->lnum, av->vol_id); 776 } 777 778 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); 779 if (!ech) { 780 ret = -ENOMEM; 781 goto fail; 782 } 783 784 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &eba_orphans, 785 u.list) { 786 int err; 787 788 if (ubi_io_is_bad(ubi, tmp_aeb->pnum)) { 789 ubi_err("bad PEB in fastmap EBA orphan list"); 790 ret = UBI_BAD_FASTMAP; 791 kfree(ech); 792 goto fail; 793 } 794 795 err = ubi_io_read_ec_hdr(ubi, tmp_aeb->pnum, ech, 0); 796 if (err && err != UBI_IO_BITFLIPS) { 797 ubi_err("unable to read EC header! PEB:%i " \ 798 "err:%i", tmp_aeb->pnum, err); 799 ret = err > 0 ? UBI_BAD_FASTMAP : err; 800 kfree(ech); 801 802 goto fail; 803 } else if (err == UBI_IO_BITFLIPS) 804 tmp_aeb->scrub = 1; 805 806 tmp_aeb->ec = be64_to_cpu(ech->ec); 807 assign_aeb_to_av(ai, tmp_aeb, av); 808 } 809 810 kfree(ech); 811 } 812 813 ret = scan_pool(ubi, ai, fmpl1->pebs, pool_size, &max_sqnum, 814 &eba_orphans, &free); 815 if (ret) 816 goto fail; 817 818 ret = scan_pool(ubi, ai, fmpl2->pebs, wl_pool_size, &max_sqnum, 819 &eba_orphans, &free); 820 if (ret) 821 goto fail; 822 823 if (max_sqnum > ai->max_sqnum) 824 ai->max_sqnum = max_sqnum; 825 826 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) 827 list_move_tail(&tmp_aeb->u.list, &ai->free); 828 829 ubi_assert(list_empty(&used)); 830 ubi_assert(list_empty(&eba_orphans)); 831 ubi_assert(list_empty(&free)); 832 833 /* 834 * If fastmap is leaking PEBs (must not happen), raise a 835 * fat warning and fall back to scanning mode. 836 * We do this here because in ubi_wl_init() it's too late 837 * and we cannot fall back to scanning. 838 */ 839 if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count - 840 ai->bad_peb_count - fm->used_blocks)) 841 goto fail_bad; 842 843 return 0; 844 845 fail_bad: 846 ret = UBI_BAD_FASTMAP; 847 fail: 848 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) { 849 kmem_cache_free(ai->aeb_slab_cache, tmp_aeb); 850 list_del(&tmp_aeb->u.list); 851 } 852 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &eba_orphans, u.list) { 853 kmem_cache_free(ai->aeb_slab_cache, tmp_aeb); 854 list_del(&tmp_aeb->u.list); 855 } 856 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) { 857 kmem_cache_free(ai->aeb_slab_cache, tmp_aeb); 858 list_del(&tmp_aeb->u.list); 859 } 860 861 return ret; 862 } 863 864 /** 865 * ubi_scan_fastmap - scan the fastmap. 866 * @ubi: UBI device object 867 * @ai: UBI attach info to be filled 868 * @fm_anchor: The fastmap starts at this PEB 869 * 870 * Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found, 871 * UBI_BAD_FASTMAP if one was found but is not usable. 872 * < 0 indicates an internal error. 873 */ 874 int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai, 875 int fm_anchor) 876 { 877 struct ubi_fm_sb *fmsb, *fmsb2; 878 struct ubi_vid_hdr *vh; 879 struct ubi_ec_hdr *ech; 880 struct ubi_fastmap_layout *fm; 881 int i, used_blocks, pnum, ret = 0; 882 size_t fm_size; 883 __be32 crc, tmp_crc; 884 unsigned long long sqnum = 0; 885 886 mutex_lock(&ubi->fm_mutex); 887 memset(ubi->fm_buf, 0, ubi->fm_size); 888 889 fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL); 890 if (!fmsb) { 891 ret = -ENOMEM; 892 goto out; 893 } 894 895 fm = kzalloc(sizeof(*fm), GFP_KERNEL); 896 if (!fm) { 897 ret = -ENOMEM; 898 kfree(fmsb); 899 goto out; 900 } 901 902 ret = ubi_io_read(ubi, fmsb, fm_anchor, ubi->leb_start, sizeof(*fmsb)); 903 if (ret && ret != UBI_IO_BITFLIPS) 904 goto free_fm_sb; 905 else if (ret == UBI_IO_BITFLIPS) 906 fm->to_be_tortured[0] = 1; 907 908 if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) { 909 ubi_err("bad super block magic: 0x%x, expected: 0x%x", 910 be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC); 911 ret = UBI_BAD_FASTMAP; 912 goto free_fm_sb; 913 } 914 915 if (fmsb->version != UBI_FM_FMT_VERSION) { 916 ubi_err("bad fastmap version: %i, expected: %i", 917 fmsb->version, UBI_FM_FMT_VERSION); 918 ret = UBI_BAD_FASTMAP; 919 goto free_fm_sb; 920 } 921 922 used_blocks = be32_to_cpu(fmsb->used_blocks); 923 if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) { 924 ubi_err("number of fastmap blocks is invalid: %i", used_blocks); 925 ret = UBI_BAD_FASTMAP; 926 goto free_fm_sb; 927 } 928 929 fm_size = ubi->leb_size * used_blocks; 930 if (fm_size != ubi->fm_size) { 931 ubi_err("bad fastmap size: %zi, expected: %zi", fm_size, 932 ubi->fm_size); 933 ret = UBI_BAD_FASTMAP; 934 goto free_fm_sb; 935 } 936 937 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); 938 if (!ech) { 939 ret = -ENOMEM; 940 goto free_fm_sb; 941 } 942 943 vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); 944 if (!vh) { 945 ret = -ENOMEM; 946 goto free_hdr; 947 } 948 949 for (i = 0; i < used_blocks; i++) { 950 int image_seq; 951 952 pnum = be32_to_cpu(fmsb->block_loc[i]); 953 954 if (ubi_io_is_bad(ubi, pnum)) { 955 ret = UBI_BAD_FASTMAP; 956 goto free_hdr; 957 } 958 959 ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0); 960 if (ret && ret != UBI_IO_BITFLIPS) { 961 ubi_err("unable to read fastmap block# %i EC (PEB: %i)", 962 i, pnum); 963 if (ret > 0) 964 ret = UBI_BAD_FASTMAP; 965 goto free_hdr; 966 } else if (ret == UBI_IO_BITFLIPS) 967 fm->to_be_tortured[i] = 1; 968 969 image_seq = be32_to_cpu(ech->image_seq); 970 if (!ubi->image_seq) 971 ubi->image_seq = image_seq; 972 973 /* 974 * Older UBI implementations have image_seq set to zero, so 975 * we shouldn't fail if image_seq == 0. 976 */ 977 if (image_seq && (image_seq != ubi->image_seq)) { 978 ubi_err("wrong image seq:%d instead of %d", 979 be32_to_cpu(ech->image_seq), ubi->image_seq); 980 ret = UBI_BAD_FASTMAP; 981 goto free_hdr; 982 } 983 984 ret = ubi_io_read_vid_hdr(ubi, pnum, vh, 0); 985 if (ret && ret != UBI_IO_BITFLIPS) { 986 ubi_err("unable to read fastmap block# %i (PEB: %i)", 987 i, pnum); 988 goto free_hdr; 989 } 990 991 if (i == 0) { 992 if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) { 993 ubi_err("bad fastmap anchor vol_id: 0x%x," \ 994 " expected: 0x%x", 995 be32_to_cpu(vh->vol_id), 996 UBI_FM_SB_VOLUME_ID); 997 ret = UBI_BAD_FASTMAP; 998 goto free_hdr; 999 } 1000 } else { 1001 if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) { 1002 ubi_err("bad fastmap data vol_id: 0x%x," \ 1003 " expected: 0x%x", 1004 be32_to_cpu(vh->vol_id), 1005 UBI_FM_DATA_VOLUME_ID); 1006 ret = UBI_BAD_FASTMAP; 1007 goto free_hdr; 1008 } 1009 } 1010 1011 if (sqnum < be64_to_cpu(vh->sqnum)) 1012 sqnum = be64_to_cpu(vh->sqnum); 1013 1014 ret = ubi_io_read(ubi, ubi->fm_buf + (ubi->leb_size * i), pnum, 1015 ubi->leb_start, ubi->leb_size); 1016 if (ret && ret != UBI_IO_BITFLIPS) { 1017 ubi_err("unable to read fastmap block# %i (PEB: %i, " \ 1018 "err: %i)", i, pnum, ret); 1019 goto free_hdr; 1020 } 1021 } 1022 1023 kfree(fmsb); 1024 fmsb = NULL; 1025 1026 fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf); 1027 tmp_crc = be32_to_cpu(fmsb2->data_crc); 1028 fmsb2->data_crc = 0; 1029 crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size); 1030 if (crc != tmp_crc) { 1031 ubi_err("fastmap data CRC is invalid"); 1032 ubi_err("CRC should be: 0x%x, calc: 0x%x", tmp_crc, crc); 1033 ret = UBI_BAD_FASTMAP; 1034 goto free_hdr; 1035 } 1036 1037 fmsb2->sqnum = sqnum; 1038 1039 fm->used_blocks = used_blocks; 1040 1041 ret = ubi_attach_fastmap(ubi, ai, fm); 1042 if (ret) { 1043 if (ret > 0) 1044 ret = UBI_BAD_FASTMAP; 1045 goto free_hdr; 1046 } 1047 1048 for (i = 0; i < used_blocks; i++) { 1049 struct ubi_wl_entry *e; 1050 1051 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); 1052 if (!e) { 1053 while (i--) 1054 kfree(fm->e[i]); 1055 1056 ret = -ENOMEM; 1057 goto free_hdr; 1058 } 1059 1060 e->pnum = be32_to_cpu(fmsb2->block_loc[i]); 1061 e->ec = be32_to_cpu(fmsb2->block_ec[i]); 1062 fm->e[i] = e; 1063 } 1064 1065 ubi->fm = fm; 1066 ubi->fm_pool.max_size = ubi->fm->max_pool_size; 1067 ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size; 1068 ubi_msg("attached by fastmap"); 1069 ubi_msg("fastmap pool size: %d", ubi->fm_pool.max_size); 1070 ubi_msg("fastmap WL pool size: %d", ubi->fm_wl_pool.max_size); 1071 ubi->fm_disabled = 0; 1072 1073 ubi_free_vid_hdr(ubi, vh); 1074 kfree(ech); 1075 out: 1076 mutex_unlock(&ubi->fm_mutex); 1077 if (ret == UBI_BAD_FASTMAP) 1078 ubi_err("Attach by fastmap failed, doing a full scan!"); 1079 return ret; 1080 1081 free_hdr: 1082 ubi_free_vid_hdr(ubi, vh); 1083 kfree(ech); 1084 free_fm_sb: 1085 kfree(fmsb); 1086 kfree(fm); 1087 goto out; 1088 } 1089 1090 /** 1091 * ubi_write_fastmap - writes a fastmap. 1092 * @ubi: UBI device object 1093 * @new_fm: the to be written fastmap 1094 * 1095 * Returns 0 on success, < 0 indicates an internal error. 1096 */ 1097 static int ubi_write_fastmap(struct ubi_device *ubi, 1098 struct ubi_fastmap_layout *new_fm) 1099 { 1100 size_t fm_pos = 0; 1101 void *fm_raw; 1102 struct ubi_fm_sb *fmsb; 1103 struct ubi_fm_hdr *fmh; 1104 struct ubi_fm_scan_pool *fmpl1, *fmpl2; 1105 struct ubi_fm_ec *fec; 1106 struct ubi_fm_volhdr *fvh; 1107 struct ubi_fm_eba *feba; 1108 struct rb_node *node; 1109 struct ubi_wl_entry *wl_e; 1110 struct ubi_volume *vol; 1111 struct ubi_vid_hdr *avhdr, *dvhdr; 1112 struct ubi_work *ubi_wrk; 1113 int ret, i, j, free_peb_count, used_peb_count, vol_count; 1114 int scrub_peb_count, erase_peb_count; 1115 1116 fm_raw = ubi->fm_buf; 1117 memset(ubi->fm_buf, 0, ubi->fm_size); 1118 1119 avhdr = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID); 1120 if (!avhdr) { 1121 ret = -ENOMEM; 1122 goto out; 1123 } 1124 1125 dvhdr = new_fm_vhdr(ubi, UBI_FM_DATA_VOLUME_ID); 1126 if (!dvhdr) { 1127 ret = -ENOMEM; 1128 goto out_kfree; 1129 } 1130 1131 spin_lock(&ubi->volumes_lock); 1132 spin_lock(&ubi->wl_lock); 1133 1134 fmsb = (struct ubi_fm_sb *)fm_raw; 1135 fm_pos += sizeof(*fmsb); 1136 ubi_assert(fm_pos <= ubi->fm_size); 1137 1138 fmh = (struct ubi_fm_hdr *)(fm_raw + fm_pos); 1139 fm_pos += sizeof(*fmh); 1140 ubi_assert(fm_pos <= ubi->fm_size); 1141 1142 fmsb->magic = cpu_to_be32(UBI_FM_SB_MAGIC); 1143 fmsb->version = UBI_FM_FMT_VERSION; 1144 fmsb->used_blocks = cpu_to_be32(new_fm->used_blocks); 1145 /* the max sqnum will be filled in while *reading* the fastmap */ 1146 fmsb->sqnum = 0; 1147 1148 fmh->magic = cpu_to_be32(UBI_FM_HDR_MAGIC); 1149 free_peb_count = 0; 1150 used_peb_count = 0; 1151 scrub_peb_count = 0; 1152 erase_peb_count = 0; 1153 vol_count = 0; 1154 1155 fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos); 1156 fm_pos += sizeof(*fmpl1); 1157 fmpl1->magic = cpu_to_be32(UBI_FM_POOL_MAGIC); 1158 fmpl1->size = cpu_to_be16(ubi->fm_pool.size); 1159 fmpl1->max_size = cpu_to_be16(ubi->fm_pool.max_size); 1160 1161 for (i = 0; i < ubi->fm_pool.size; i++) 1162 fmpl1->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]); 1163 1164 fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos); 1165 fm_pos += sizeof(*fmpl2); 1166 fmpl2->magic = cpu_to_be32(UBI_FM_POOL_MAGIC); 1167 fmpl2->size = cpu_to_be16(ubi->fm_wl_pool.size); 1168 fmpl2->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size); 1169 1170 for (i = 0; i < ubi->fm_wl_pool.size; i++) 1171 fmpl2->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]); 1172 1173 for (node = rb_first(&ubi->free); node; node = rb_next(node)) { 1174 wl_e = rb_entry(node, struct ubi_wl_entry, u.rb); 1175 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 1176 1177 fec->pnum = cpu_to_be32(wl_e->pnum); 1178 fec->ec = cpu_to_be32(wl_e->ec); 1179 1180 free_peb_count++; 1181 fm_pos += sizeof(*fec); 1182 ubi_assert(fm_pos <= ubi->fm_size); 1183 } 1184 fmh->free_peb_count = cpu_to_be32(free_peb_count); 1185 1186 for (node = rb_first(&ubi->used); node; node = rb_next(node)) { 1187 wl_e = rb_entry(node, struct ubi_wl_entry, u.rb); 1188 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 1189 1190 fec->pnum = cpu_to_be32(wl_e->pnum); 1191 fec->ec = cpu_to_be32(wl_e->ec); 1192 1193 used_peb_count++; 1194 fm_pos += sizeof(*fec); 1195 ubi_assert(fm_pos <= ubi->fm_size); 1196 } 1197 fmh->used_peb_count = cpu_to_be32(used_peb_count); 1198 1199 for (node = rb_first(&ubi->scrub); node; node = rb_next(node)) { 1200 wl_e = rb_entry(node, struct ubi_wl_entry, u.rb); 1201 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 1202 1203 fec->pnum = cpu_to_be32(wl_e->pnum); 1204 fec->ec = cpu_to_be32(wl_e->ec); 1205 1206 scrub_peb_count++; 1207 fm_pos += sizeof(*fec); 1208 ubi_assert(fm_pos <= ubi->fm_size); 1209 } 1210 fmh->scrub_peb_count = cpu_to_be32(scrub_peb_count); 1211 1212 1213 list_for_each_entry(ubi_wrk, &ubi->works, list) { 1214 if (ubi_is_erase_work(ubi_wrk)) { 1215 wl_e = ubi_wrk->e; 1216 ubi_assert(wl_e); 1217 1218 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos); 1219 1220 fec->pnum = cpu_to_be32(wl_e->pnum); 1221 fec->ec = cpu_to_be32(wl_e->ec); 1222 1223 erase_peb_count++; 1224 fm_pos += sizeof(*fec); 1225 ubi_assert(fm_pos <= ubi->fm_size); 1226 } 1227 } 1228 fmh->erase_peb_count = cpu_to_be32(erase_peb_count); 1229 1230 for (i = 0; i < UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT; i++) { 1231 vol = ubi->volumes[i]; 1232 1233 if (!vol) 1234 continue; 1235 1236 vol_count++; 1237 1238 fvh = (struct ubi_fm_volhdr *)(fm_raw + fm_pos); 1239 fm_pos += sizeof(*fvh); 1240 ubi_assert(fm_pos <= ubi->fm_size); 1241 1242 fvh->magic = cpu_to_be32(UBI_FM_VHDR_MAGIC); 1243 fvh->vol_id = cpu_to_be32(vol->vol_id); 1244 fvh->vol_type = vol->vol_type; 1245 fvh->used_ebs = cpu_to_be32(vol->used_ebs); 1246 fvh->data_pad = cpu_to_be32(vol->data_pad); 1247 fvh->last_eb_bytes = cpu_to_be32(vol->last_eb_bytes); 1248 1249 ubi_assert(vol->vol_type == UBI_DYNAMIC_VOLUME || 1250 vol->vol_type == UBI_STATIC_VOLUME); 1251 1252 feba = (struct ubi_fm_eba *)(fm_raw + fm_pos); 1253 fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->reserved_pebs); 1254 ubi_assert(fm_pos <= ubi->fm_size); 1255 1256 for (j = 0; j < vol->reserved_pebs; j++) 1257 feba->pnum[j] = cpu_to_be32(vol->eba_tbl[j]); 1258 1259 feba->reserved_pebs = cpu_to_be32(j); 1260 feba->magic = cpu_to_be32(UBI_FM_EBA_MAGIC); 1261 } 1262 fmh->vol_count = cpu_to_be32(vol_count); 1263 fmh->bad_peb_count = cpu_to_be32(ubi->bad_peb_count); 1264 1265 avhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); 1266 avhdr->lnum = 0; 1267 1268 spin_unlock(&ubi->wl_lock); 1269 spin_unlock(&ubi->volumes_lock); 1270 1271 dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum); 1272 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avhdr); 1273 if (ret) { 1274 ubi_err("unable to write vid_hdr to fastmap SB!"); 1275 goto out_kfree; 1276 } 1277 1278 for (i = 0; i < new_fm->used_blocks; i++) { 1279 fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum); 1280 fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec); 1281 } 1282 1283 fmsb->data_crc = 0; 1284 fmsb->data_crc = cpu_to_be32(crc32(UBI_CRC32_INIT, fm_raw, 1285 ubi->fm_size)); 1286 1287 for (i = 1; i < new_fm->used_blocks; i++) { 1288 dvhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); 1289 dvhdr->lnum = cpu_to_be32(i); 1290 dbg_bld("writing fastmap data to PEB %i sqnum %llu", 1291 new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum)); 1292 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvhdr); 1293 if (ret) { 1294 ubi_err("unable to write vid_hdr to PEB %i!", 1295 new_fm->e[i]->pnum); 1296 goto out_kfree; 1297 } 1298 } 1299 1300 for (i = 0; i < new_fm->used_blocks; i++) { 1301 ret = ubi_io_write(ubi, fm_raw + (i * ubi->leb_size), 1302 new_fm->e[i]->pnum, ubi->leb_start, ubi->leb_size); 1303 if (ret) { 1304 ubi_err("unable to write fastmap to PEB %i!", 1305 new_fm->e[i]->pnum); 1306 goto out_kfree; 1307 } 1308 } 1309 1310 ubi_assert(new_fm); 1311 ubi->fm = new_fm; 1312 1313 dbg_bld("fastmap written!"); 1314 1315 out_kfree: 1316 ubi_free_vid_hdr(ubi, avhdr); 1317 ubi_free_vid_hdr(ubi, dvhdr); 1318 out: 1319 return ret; 1320 } 1321 1322 /** 1323 * erase_block - Manually erase a PEB. 1324 * @ubi: UBI device object 1325 * @pnum: PEB to be erased 1326 * 1327 * Returns the new EC value on success, < 0 indicates an internal error. 1328 */ 1329 static int erase_block(struct ubi_device *ubi, int pnum) 1330 { 1331 int ret; 1332 struct ubi_ec_hdr *ec_hdr; 1333 long long ec; 1334 1335 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); 1336 if (!ec_hdr) 1337 return -ENOMEM; 1338 1339 ret = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0); 1340 if (ret < 0) 1341 goto out; 1342 else if (ret && ret != UBI_IO_BITFLIPS) { 1343 ret = -EINVAL; 1344 goto out; 1345 } 1346 1347 ret = ubi_io_sync_erase(ubi, pnum, 0); 1348 if (ret < 0) 1349 goto out; 1350 1351 ec = be64_to_cpu(ec_hdr->ec); 1352 ec += ret; 1353 if (ec > UBI_MAX_ERASECOUNTER) { 1354 ret = -EINVAL; 1355 goto out; 1356 } 1357 1358 ec_hdr->ec = cpu_to_be64(ec); 1359 ret = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr); 1360 if (ret < 0) 1361 goto out; 1362 1363 ret = ec; 1364 out: 1365 kfree(ec_hdr); 1366 return ret; 1367 } 1368 1369 /** 1370 * invalidate_fastmap - destroys a fastmap. 1371 * @ubi: UBI device object 1372 * @fm: the fastmap to be destroyed 1373 * 1374 * Returns 0 on success, < 0 indicates an internal error. 1375 */ 1376 static int invalidate_fastmap(struct ubi_device *ubi, 1377 struct ubi_fastmap_layout *fm) 1378 { 1379 int ret; 1380 struct ubi_vid_hdr *vh; 1381 1382 ret = erase_block(ubi, fm->e[0]->pnum); 1383 if (ret < 0) 1384 return ret; 1385 1386 vh = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID); 1387 if (!vh) 1388 return -ENOMEM; 1389 1390 /* deleting the current fastmap SB is not enough, an old SB may exist, 1391 * so create a (corrupted) SB such that fastmap will find it and fall 1392 * back to scanning mode in any case */ 1393 vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); 1394 ret = ubi_io_write_vid_hdr(ubi, fm->e[0]->pnum, vh); 1395 1396 return ret; 1397 } 1398 1399 /** 1400 * ubi_update_fastmap - will be called by UBI if a volume changes or 1401 * a fastmap pool becomes full. 1402 * @ubi: UBI device object 1403 * 1404 * Returns 0 on success, < 0 indicates an internal error. 1405 */ 1406 int ubi_update_fastmap(struct ubi_device *ubi) 1407 { 1408 int ret, i; 1409 struct ubi_fastmap_layout *new_fm, *old_fm; 1410 struct ubi_wl_entry *tmp_e; 1411 1412 mutex_lock(&ubi->fm_mutex); 1413 1414 ubi_refill_pools(ubi); 1415 1416 if (ubi->ro_mode || ubi->fm_disabled) { 1417 mutex_unlock(&ubi->fm_mutex); 1418 return 0; 1419 } 1420 1421 ret = ubi_ensure_anchor_pebs(ubi); 1422 if (ret) { 1423 mutex_unlock(&ubi->fm_mutex); 1424 return ret; 1425 } 1426 1427 new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL); 1428 if (!new_fm) { 1429 mutex_unlock(&ubi->fm_mutex); 1430 return -ENOMEM; 1431 } 1432 1433 new_fm->used_blocks = ubi->fm_size / ubi->leb_size; 1434 1435 for (i = 0; i < new_fm->used_blocks; i++) { 1436 new_fm->e[i] = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); 1437 if (!new_fm->e[i]) { 1438 while (i--) 1439 kfree(new_fm->e[i]); 1440 1441 kfree(new_fm); 1442 mutex_unlock(&ubi->fm_mutex); 1443 return -ENOMEM; 1444 } 1445 } 1446 1447 old_fm = ubi->fm; 1448 ubi->fm = NULL; 1449 1450 if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) { 1451 ubi_err("fastmap too large"); 1452 ret = -ENOSPC; 1453 goto err; 1454 } 1455 1456 for (i = 1; i < new_fm->used_blocks; i++) { 1457 spin_lock(&ubi->wl_lock); 1458 tmp_e = ubi_wl_get_fm_peb(ubi, 0); 1459 spin_unlock(&ubi->wl_lock); 1460 1461 if (!tmp_e && !old_fm) { 1462 int j; 1463 ubi_err("could not get any free erase block"); 1464 1465 for (j = 1; j < i; j++) 1466 ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0); 1467 1468 ret = -ENOSPC; 1469 goto err; 1470 } else if (!tmp_e && old_fm) { 1471 ret = erase_block(ubi, old_fm->e[i]->pnum); 1472 if (ret < 0) { 1473 int j; 1474 1475 for (j = 1; j < i; j++) 1476 ubi_wl_put_fm_peb(ubi, new_fm->e[j], 1477 j, 0); 1478 1479 ubi_err("could not erase old fastmap PEB"); 1480 goto err; 1481 } 1482 1483 new_fm->e[i]->pnum = old_fm->e[i]->pnum; 1484 new_fm->e[i]->ec = old_fm->e[i]->ec; 1485 } else { 1486 new_fm->e[i]->pnum = tmp_e->pnum; 1487 new_fm->e[i]->ec = tmp_e->ec; 1488 1489 if (old_fm) 1490 ubi_wl_put_fm_peb(ubi, old_fm->e[i], i, 1491 old_fm->to_be_tortured[i]); 1492 } 1493 } 1494 1495 spin_lock(&ubi->wl_lock); 1496 tmp_e = ubi_wl_get_fm_peb(ubi, 1); 1497 spin_unlock(&ubi->wl_lock); 1498 1499 if (old_fm) { 1500 /* no fresh anchor PEB was found, reuse the old one */ 1501 if (!tmp_e) { 1502 ret = erase_block(ubi, old_fm->e[0]->pnum); 1503 if (ret < 0) { 1504 int i; 1505 ubi_err("could not erase old anchor PEB"); 1506 1507 for (i = 1; i < new_fm->used_blocks; i++) 1508 ubi_wl_put_fm_peb(ubi, new_fm->e[i], 1509 i, 0); 1510 goto err; 1511 } 1512 1513 new_fm->e[0]->pnum = old_fm->e[0]->pnum; 1514 new_fm->e[0]->ec = ret; 1515 } else { 1516 /* we've got a new anchor PEB, return the old one */ 1517 ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0, 1518 old_fm->to_be_tortured[0]); 1519 1520 new_fm->e[0]->pnum = tmp_e->pnum; 1521 new_fm->e[0]->ec = tmp_e->ec; 1522 } 1523 } else { 1524 if (!tmp_e) { 1525 int i; 1526 ubi_err("could not find any anchor PEB"); 1527 1528 for (i = 1; i < new_fm->used_blocks; i++) 1529 ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0); 1530 1531 ret = -ENOSPC; 1532 goto err; 1533 } 1534 1535 new_fm->e[0]->pnum = tmp_e->pnum; 1536 new_fm->e[0]->ec = tmp_e->ec; 1537 } 1538 1539 down_write(&ubi->work_sem); 1540 down_write(&ubi->fm_sem); 1541 ret = ubi_write_fastmap(ubi, new_fm); 1542 up_write(&ubi->fm_sem); 1543 up_write(&ubi->work_sem); 1544 1545 if (ret) 1546 goto err; 1547 1548 out_unlock: 1549 mutex_unlock(&ubi->fm_mutex); 1550 kfree(old_fm); 1551 return ret; 1552 1553 err: 1554 kfree(new_fm); 1555 1556 ubi_warn("Unable to write new fastmap, err=%i", ret); 1557 1558 ret = 0; 1559 if (old_fm) { 1560 ret = invalidate_fastmap(ubi, old_fm); 1561 if (ret < 0) 1562 ubi_err("Unable to invalidiate current fastmap!"); 1563 else if (ret) 1564 ret = 0; 1565 } 1566 goto out_unlock; 1567 } 1568