1*4549e789STom Rini // SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
26f4e7d3cSThomas Gleixner /*
36f4e7d3cSThomas Gleixner * Copyright (c) Thomas Gleixner <tglx@linutronix.de>
46f4e7d3cSThomas Gleixner *
56f4e7d3cSThomas Gleixner * The parts taken from the kernel implementation are:
66f4e7d3cSThomas Gleixner *
76f4e7d3cSThomas Gleixner * Copyright (c) International Business Machines Corp., 2006
86f4e7d3cSThomas Gleixner */
96f4e7d3cSThomas Gleixner
106f4e7d3cSThomas Gleixner #include <common.h>
116f4e7d3cSThomas Gleixner #include <errno.h>
126f4e7d3cSThomas Gleixner #include <ubispl.h>
136f4e7d3cSThomas Gleixner
146f4e7d3cSThomas Gleixner #include <linux/crc32.h>
156f4e7d3cSThomas Gleixner
166f4e7d3cSThomas Gleixner #include "ubispl.h"
176f4e7d3cSThomas Gleixner
186f4e7d3cSThomas Gleixner /**
196f4e7d3cSThomas Gleixner * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device.
206f4e7d3cSThomas Gleixner * @ubi: UBI device description object
216f4e7d3cSThomas Gleixner */
ubi_calc_fm_size(struct ubi_scan_info * ubi)226f4e7d3cSThomas Gleixner static size_t ubi_calc_fm_size(struct ubi_scan_info *ubi)
236f4e7d3cSThomas Gleixner {
246f4e7d3cSThomas Gleixner size_t size;
256f4e7d3cSThomas Gleixner
266f4e7d3cSThomas Gleixner size = sizeof(struct ubi_fm_sb) +
276f4e7d3cSThomas Gleixner sizeof(struct ubi_fm_hdr) +
286f4e7d3cSThomas Gleixner sizeof(struct ubi_fm_scan_pool) +
296f4e7d3cSThomas Gleixner sizeof(struct ubi_fm_scan_pool) +
306f4e7d3cSThomas Gleixner (ubi->peb_count * sizeof(struct ubi_fm_ec)) +
316f4e7d3cSThomas Gleixner (sizeof(struct ubi_fm_eba) +
326f4e7d3cSThomas Gleixner (ubi->peb_count * sizeof(__be32))) +
336f4e7d3cSThomas Gleixner sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
346f4e7d3cSThomas Gleixner return roundup(size, ubi->leb_size);
356f4e7d3cSThomas Gleixner }
366f4e7d3cSThomas Gleixner
ubi_io_read(struct ubi_scan_info * ubi,void * buf,int pnum,unsigned long from,unsigned long len)376f4e7d3cSThomas Gleixner static int ubi_io_read(struct ubi_scan_info *ubi, void *buf, int pnum,
386f4e7d3cSThomas Gleixner unsigned long from, unsigned long len)
396f4e7d3cSThomas Gleixner {
406f4e7d3cSThomas Gleixner return ubi->read(pnum + ubi->peb_offset, from, len, buf);
416f4e7d3cSThomas Gleixner }
426f4e7d3cSThomas Gleixner
ubi_io_is_bad(struct ubi_scan_info * ubi,int peb)436f4e7d3cSThomas Gleixner static int ubi_io_is_bad(struct ubi_scan_info *ubi, int peb)
446f4e7d3cSThomas Gleixner {
456f4e7d3cSThomas Gleixner return peb >= ubi->peb_count || peb < 0;
466f4e7d3cSThomas Gleixner }
476f4e7d3cSThomas Gleixner
ubi_io_read_vid_hdr(struct ubi_scan_info * ubi,int pnum,struct ubi_vid_hdr * vh,int unused)486f4e7d3cSThomas Gleixner static int ubi_io_read_vid_hdr(struct ubi_scan_info *ubi, int pnum,
496f4e7d3cSThomas Gleixner struct ubi_vid_hdr *vh, int unused)
506f4e7d3cSThomas Gleixner {
516f4e7d3cSThomas Gleixner u32 magic;
526f4e7d3cSThomas Gleixner int res;
536f4e7d3cSThomas Gleixner
546f4e7d3cSThomas Gleixner /* No point in rescanning a corrupt block */
556f4e7d3cSThomas Gleixner if (test_bit(pnum, ubi->corrupt))
566f4e7d3cSThomas Gleixner return UBI_IO_BAD_HDR;
576f4e7d3cSThomas Gleixner /*
586f4e7d3cSThomas Gleixner * If the block has been scanned already, no need to rescan
596f4e7d3cSThomas Gleixner */
606f4e7d3cSThomas Gleixner if (test_and_set_bit(pnum, ubi->scanned))
616f4e7d3cSThomas Gleixner return 0;
626f4e7d3cSThomas Gleixner
636f4e7d3cSThomas Gleixner res = ubi_io_read(ubi, vh, pnum, ubi->vid_offset, sizeof(*vh));
646f4e7d3cSThomas Gleixner
656f4e7d3cSThomas Gleixner /*
666f4e7d3cSThomas Gleixner * Bad block, unrecoverable ECC error, skip the block
676f4e7d3cSThomas Gleixner */
686f4e7d3cSThomas Gleixner if (res) {
696f4e7d3cSThomas Gleixner ubi_dbg("Skipping bad or unreadable block %d", pnum);
706f4e7d3cSThomas Gleixner vh->magic = 0;
716f4e7d3cSThomas Gleixner generic_set_bit(pnum, ubi->corrupt);
726f4e7d3cSThomas Gleixner return res;
736f4e7d3cSThomas Gleixner }
746f4e7d3cSThomas Gleixner
756f4e7d3cSThomas Gleixner /* Magic number available ? */
766f4e7d3cSThomas Gleixner magic = be32_to_cpu(vh->magic);
776f4e7d3cSThomas Gleixner if (magic != UBI_VID_HDR_MAGIC) {
786f4e7d3cSThomas Gleixner generic_set_bit(pnum, ubi->corrupt);
796f4e7d3cSThomas Gleixner if (magic == 0xffffffff)
806f4e7d3cSThomas Gleixner return UBI_IO_FF;
816f4e7d3cSThomas Gleixner ubi_msg("Bad magic in block 0%d %08x", pnum, magic);
826f4e7d3cSThomas Gleixner return UBI_IO_BAD_HDR;
836f4e7d3cSThomas Gleixner }
846f4e7d3cSThomas Gleixner
856f4e7d3cSThomas Gleixner /* Header CRC correct ? */
866f4e7d3cSThomas Gleixner if (crc32(UBI_CRC32_INIT, vh, UBI_VID_HDR_SIZE_CRC) !=
876f4e7d3cSThomas Gleixner be32_to_cpu(vh->hdr_crc)) {
886f4e7d3cSThomas Gleixner ubi_msg("Bad CRC in block 0%d", pnum);
896f4e7d3cSThomas Gleixner generic_set_bit(pnum, ubi->corrupt);
906f4e7d3cSThomas Gleixner return UBI_IO_BAD_HDR;
916f4e7d3cSThomas Gleixner }
926f4e7d3cSThomas Gleixner
936f4e7d3cSThomas Gleixner ubi_dbg("RV: pnum: %i sqnum %llu", pnum, be64_to_cpu(vh->sqnum));
946f4e7d3cSThomas Gleixner
956f4e7d3cSThomas Gleixner return 0;
966f4e7d3cSThomas Gleixner }
976f4e7d3cSThomas Gleixner
ubi_rescan_fm_vid_hdr(struct ubi_scan_info * ubi,struct ubi_vid_hdr * vh,u32 fm_pnum,u32 fm_vol_id,u32 fm_lnum)986f4e7d3cSThomas Gleixner static int ubi_rescan_fm_vid_hdr(struct ubi_scan_info *ubi,
996f4e7d3cSThomas Gleixner struct ubi_vid_hdr *vh,
1006f4e7d3cSThomas Gleixner u32 fm_pnum, u32 fm_vol_id, u32 fm_lnum)
1016f4e7d3cSThomas Gleixner {
1026f4e7d3cSThomas Gleixner int res;
1036f4e7d3cSThomas Gleixner
1046f4e7d3cSThomas Gleixner if (ubi_io_is_bad(ubi, fm_pnum))
1056f4e7d3cSThomas Gleixner return -EINVAL;
1066f4e7d3cSThomas Gleixner
1076f4e7d3cSThomas Gleixner res = ubi_io_read_vid_hdr(ubi, fm_pnum, vh, 0);
1086f4e7d3cSThomas Gleixner if (!res) {
1096f4e7d3cSThomas Gleixner /* Check volume id, volume type and lnum */
1106f4e7d3cSThomas Gleixner if (be32_to_cpu(vh->vol_id) == fm_vol_id &&
1116f4e7d3cSThomas Gleixner vh->vol_type == UBI_VID_STATIC &&
1126f4e7d3cSThomas Gleixner be32_to_cpu(vh->lnum) == fm_lnum)
1136f4e7d3cSThomas Gleixner return 0;
1146f4e7d3cSThomas Gleixner ubi_dbg("RS: PEB %u vol: %u : %u typ %u lnum %u %u",
1156f4e7d3cSThomas Gleixner fm_pnum, fm_vol_id, vh->vol_type,
1166f4e7d3cSThomas Gleixner be32_to_cpu(vh->vol_id),
1176f4e7d3cSThomas Gleixner fm_lnum, be32_to_cpu(vh->lnum));
1186f4e7d3cSThomas Gleixner }
1196f4e7d3cSThomas Gleixner return res;
1206f4e7d3cSThomas Gleixner }
1216f4e7d3cSThomas Gleixner
1226f4e7d3cSThomas Gleixner /* Insert the logic block into the volume info */
ubi_add_peb_to_vol(struct ubi_scan_info * ubi,struct ubi_vid_hdr * vh,u32 vol_id,u32 pnum,u32 lnum)1236f4e7d3cSThomas Gleixner static int ubi_add_peb_to_vol(struct ubi_scan_info *ubi,
1246f4e7d3cSThomas Gleixner struct ubi_vid_hdr *vh, u32 vol_id,
1256f4e7d3cSThomas Gleixner u32 pnum, u32 lnum)
1266f4e7d3cSThomas Gleixner {
1276f4e7d3cSThomas Gleixner struct ubi_vol_info *vi = ubi->volinfo + vol_id;
1286f4e7d3cSThomas Gleixner u32 *ltp;
1296f4e7d3cSThomas Gleixner
1306f4e7d3cSThomas Gleixner /*
1316f4e7d3cSThomas Gleixner * If the volume is larger than expected, yell and give up :(
1326f4e7d3cSThomas Gleixner */
1336f4e7d3cSThomas Gleixner if (lnum >= UBI_MAX_VOL_LEBS) {
1346f4e7d3cSThomas Gleixner ubi_warn("Vol: %u LEB %d > %d", vol_id, lnum, UBI_MAX_VOL_LEBS);
1356f4e7d3cSThomas Gleixner return -EINVAL;
1366f4e7d3cSThomas Gleixner }
1376f4e7d3cSThomas Gleixner
1386f4e7d3cSThomas Gleixner ubi_dbg("SC: Add PEB %u to Vol %u as LEB %u fnd %d sc %d",
1396f4e7d3cSThomas Gleixner pnum, vol_id, lnum, !!test_bit(lnum, vi->found),
1406f4e7d3cSThomas Gleixner !!test_bit(pnum, ubi->scanned));
1416f4e7d3cSThomas Gleixner
1426f4e7d3cSThomas Gleixner /* Points to the translation entry */
1436f4e7d3cSThomas Gleixner ltp = vi->lebs_to_pebs + lnum;
1446f4e7d3cSThomas Gleixner
1456f4e7d3cSThomas Gleixner /* If the block is already assigned, check sqnum */
1466f4e7d3cSThomas Gleixner if (__test_and_set_bit(lnum, vi->found)) {
1476f4e7d3cSThomas Gleixner u32 cur_pnum = *ltp;
1486f4e7d3cSThomas Gleixner struct ubi_vid_hdr *cur = ubi->blockinfo + cur_pnum;
1496f4e7d3cSThomas Gleixner
1506f4e7d3cSThomas Gleixner /*
1516f4e7d3cSThomas Gleixner * If the current block hase not yet been scanned, we
1526f4e7d3cSThomas Gleixner * need to do that. The other block might be stale or
1536f4e7d3cSThomas Gleixner * the current block corrupted and the FM not yet
1546f4e7d3cSThomas Gleixner * updated.
1556f4e7d3cSThomas Gleixner */
1566f4e7d3cSThomas Gleixner if (!test_bit(cur_pnum, ubi->scanned)) {
1576f4e7d3cSThomas Gleixner /*
1586f4e7d3cSThomas Gleixner * If the scan fails, we use the valid block
1596f4e7d3cSThomas Gleixner */
1606f4e7d3cSThomas Gleixner if (ubi_rescan_fm_vid_hdr(ubi, cur, cur_pnum, vol_id,
1616f4e7d3cSThomas Gleixner lnum)) {
1626f4e7d3cSThomas Gleixner *ltp = pnum;
1636f4e7d3cSThomas Gleixner return 0;
1646f4e7d3cSThomas Gleixner }
1656f4e7d3cSThomas Gleixner }
1666f4e7d3cSThomas Gleixner
1676f4e7d3cSThomas Gleixner /*
1686f4e7d3cSThomas Gleixner * Should not happen ....
1696f4e7d3cSThomas Gleixner */
1706f4e7d3cSThomas Gleixner if (test_bit(cur_pnum, ubi->corrupt)) {
1716f4e7d3cSThomas Gleixner *ltp = pnum;
1726f4e7d3cSThomas Gleixner return 0;
1736f4e7d3cSThomas Gleixner }
1746f4e7d3cSThomas Gleixner
1756f4e7d3cSThomas Gleixner ubi_dbg("Vol %u LEB %u PEB %u->sqnum %llu NPEB %u->sqnum %llu",
1766f4e7d3cSThomas Gleixner vol_id, lnum, cur_pnum, be64_to_cpu(cur->sqnum), pnum,
1776f4e7d3cSThomas Gleixner be64_to_cpu(vh->sqnum));
1786f4e7d3cSThomas Gleixner
1796f4e7d3cSThomas Gleixner /*
1806f4e7d3cSThomas Gleixner * Compare sqnum and take the newer one
1816f4e7d3cSThomas Gleixner */
1826f4e7d3cSThomas Gleixner if (be64_to_cpu(cur->sqnum) < be64_to_cpu(vh->sqnum))
1836f4e7d3cSThomas Gleixner *ltp = pnum;
1846f4e7d3cSThomas Gleixner } else {
1856f4e7d3cSThomas Gleixner *ltp = pnum;
1866f4e7d3cSThomas Gleixner if (lnum > vi->last_block)
1876f4e7d3cSThomas Gleixner vi->last_block = lnum;
1886f4e7d3cSThomas Gleixner }
1896f4e7d3cSThomas Gleixner
1906f4e7d3cSThomas Gleixner return 0;
1916f4e7d3cSThomas Gleixner }
1926f4e7d3cSThomas Gleixner
ubi_scan_vid_hdr(struct ubi_scan_info * ubi,struct ubi_vid_hdr * vh,u32 pnum)1936f4e7d3cSThomas Gleixner static int ubi_scan_vid_hdr(struct ubi_scan_info *ubi, struct ubi_vid_hdr *vh,
1946f4e7d3cSThomas Gleixner u32 pnum)
1956f4e7d3cSThomas Gleixner {
1966f4e7d3cSThomas Gleixner u32 vol_id, lnum;
1976f4e7d3cSThomas Gleixner int res;
1986f4e7d3cSThomas Gleixner
1996f4e7d3cSThomas Gleixner if (ubi_io_is_bad(ubi, pnum))
2006f4e7d3cSThomas Gleixner return -EINVAL;
2016f4e7d3cSThomas Gleixner
2026f4e7d3cSThomas Gleixner res = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
2036f4e7d3cSThomas Gleixner if (res)
2046f4e7d3cSThomas Gleixner return res;
2056f4e7d3cSThomas Gleixner
2066f4e7d3cSThomas Gleixner /* Get volume id */
2076f4e7d3cSThomas Gleixner vol_id = be32_to_cpu(vh->vol_id);
2086f4e7d3cSThomas Gleixner
2096f4e7d3cSThomas Gleixner /* If this is the fastmap anchor, return right away */
2106f4e7d3cSThomas Gleixner if (vol_id == UBI_FM_SB_VOLUME_ID)
2116f4e7d3cSThomas Gleixner return ubi->fm_enabled ? UBI_FASTMAP_ANCHOR : 0;
2126f4e7d3cSThomas Gleixner
2136f4e7d3cSThomas Gleixner /* We only care about static volumes with an id < UBI_SPL_VOL_IDS */
2146f4e7d3cSThomas Gleixner if (vol_id >= UBI_SPL_VOL_IDS || vh->vol_type != UBI_VID_STATIC)
2156f4e7d3cSThomas Gleixner return 0;
2166f4e7d3cSThomas Gleixner
2176f4e7d3cSThomas Gleixner /* We are only interested in the volumes to load */
2186f4e7d3cSThomas Gleixner if (!test_bit(vol_id, ubi->toload))
2196f4e7d3cSThomas Gleixner return 0;
2206f4e7d3cSThomas Gleixner
2216f4e7d3cSThomas Gleixner lnum = be32_to_cpu(vh->lnum);
2226f4e7d3cSThomas Gleixner return ubi_add_peb_to_vol(ubi, vh, vol_id, pnum, lnum);
2236f4e7d3cSThomas Gleixner }
2246f4e7d3cSThomas Gleixner
assign_aeb_to_av(struct ubi_scan_info * ubi,u32 pnum,u32 lnum,u32 vol_id,u32 vol_type,u32 used)2256f4e7d3cSThomas Gleixner static int assign_aeb_to_av(struct ubi_scan_info *ubi, u32 pnum, u32 lnum,
2266f4e7d3cSThomas Gleixner u32 vol_id, u32 vol_type, u32 used)
2276f4e7d3cSThomas Gleixner {
2286f4e7d3cSThomas Gleixner struct ubi_vid_hdr *vh;
2296f4e7d3cSThomas Gleixner
2306f4e7d3cSThomas Gleixner if (ubi_io_is_bad(ubi, pnum))
2316f4e7d3cSThomas Gleixner return -EINVAL;
2326f4e7d3cSThomas Gleixner
2336f4e7d3cSThomas Gleixner ubi->fastmap_pebs++;
2346f4e7d3cSThomas Gleixner
2356f4e7d3cSThomas Gleixner if (vol_id >= UBI_SPL_VOL_IDS || vol_type != UBI_STATIC_VOLUME)
2366f4e7d3cSThomas Gleixner return 0;
2376f4e7d3cSThomas Gleixner
2386f4e7d3cSThomas Gleixner /* We are only interested in the volumes to load */
2396f4e7d3cSThomas Gleixner if (!test_bit(vol_id, ubi->toload))
2406f4e7d3cSThomas Gleixner return 0;
2416f4e7d3cSThomas Gleixner
2426f4e7d3cSThomas Gleixner vh = ubi->blockinfo + pnum;
2436f4e7d3cSThomas Gleixner
2446f4e7d3cSThomas Gleixner return ubi_scan_vid_hdr(ubi, vh, pnum);
2456f4e7d3cSThomas Gleixner }
2466f4e7d3cSThomas Gleixner
scan_pool(struct ubi_scan_info * ubi,__be32 * pebs,int pool_size)2476f4e7d3cSThomas Gleixner static int scan_pool(struct ubi_scan_info *ubi, __be32 *pebs, int pool_size)
2486f4e7d3cSThomas Gleixner {
2496f4e7d3cSThomas Gleixner struct ubi_vid_hdr *vh;
2506f4e7d3cSThomas Gleixner u32 pnum;
2516f4e7d3cSThomas Gleixner int i;
2526f4e7d3cSThomas Gleixner
2536f4e7d3cSThomas Gleixner ubi_dbg("Scanning pool size: %d", pool_size);
2546f4e7d3cSThomas Gleixner
2556f4e7d3cSThomas Gleixner for (i = 0; i < pool_size; i++) {
2566f4e7d3cSThomas Gleixner pnum = be32_to_cpu(pebs[i]);
2576f4e7d3cSThomas Gleixner
2586f4e7d3cSThomas Gleixner if (ubi_io_is_bad(ubi, pnum)) {
2596f4e7d3cSThomas Gleixner ubi_err("FM: Bad PEB in fastmap pool! %u", pnum);
2606f4e7d3cSThomas Gleixner return UBI_BAD_FASTMAP;
2616f4e7d3cSThomas Gleixner }
2626f4e7d3cSThomas Gleixner
2636f4e7d3cSThomas Gleixner vh = ubi->blockinfo + pnum;
2646f4e7d3cSThomas Gleixner /*
2656f4e7d3cSThomas Gleixner * We allow the scan to fail here. The loader will notice
2666f4e7d3cSThomas Gleixner * and look for a replacement.
2676f4e7d3cSThomas Gleixner */
2686f4e7d3cSThomas Gleixner ubi_scan_vid_hdr(ubi, vh, pnum);
2696f4e7d3cSThomas Gleixner }
2706f4e7d3cSThomas Gleixner return 0;
2716f4e7d3cSThomas Gleixner }
2726f4e7d3cSThomas Gleixner
2736f4e7d3cSThomas Gleixner /*
2746f4e7d3cSThomas Gleixner * Fastmap code is stolen from Linux kernel and this stub structure is used
2756f4e7d3cSThomas Gleixner * to make it happy.
2766f4e7d3cSThomas Gleixner */
2776f4e7d3cSThomas Gleixner struct ubi_attach_info {
2786f4e7d3cSThomas Gleixner int i;
2796f4e7d3cSThomas Gleixner };
2806f4e7d3cSThomas Gleixner
ubi_attach_fastmap(struct ubi_scan_info * ubi,struct ubi_attach_info * ai,struct ubi_fastmap_layout * fm)2816f4e7d3cSThomas Gleixner static int ubi_attach_fastmap(struct ubi_scan_info *ubi,
2826f4e7d3cSThomas Gleixner struct ubi_attach_info *ai,
2836f4e7d3cSThomas Gleixner struct ubi_fastmap_layout *fm)
2846f4e7d3cSThomas Gleixner {
2856f4e7d3cSThomas Gleixner struct ubi_fm_hdr *fmhdr;
2866f4e7d3cSThomas Gleixner struct ubi_fm_scan_pool *fmpl1, *fmpl2;
2876f4e7d3cSThomas Gleixner struct ubi_fm_ec *fmec;
2886f4e7d3cSThomas Gleixner struct ubi_fm_volhdr *fmvhdr;
2896f4e7d3cSThomas Gleixner struct ubi_fm_eba *fm_eba;
2906f4e7d3cSThomas Gleixner int ret, i, j, pool_size, wl_pool_size;
2916f4e7d3cSThomas Gleixner size_t fm_pos = 0, fm_size = ubi->fm_size;
2926f4e7d3cSThomas Gleixner void *fm_raw = ubi->fm_buf;
2936f4e7d3cSThomas Gleixner
2946f4e7d3cSThomas Gleixner memset(ubi->fm_used, 0, sizeof(ubi->fm_used));
2956f4e7d3cSThomas Gleixner
2966f4e7d3cSThomas Gleixner fm_pos += sizeof(struct ubi_fm_sb);
2976f4e7d3cSThomas Gleixner if (fm_pos >= fm_size)
2986f4e7d3cSThomas Gleixner goto fail_bad;
2996f4e7d3cSThomas Gleixner
3006f4e7d3cSThomas Gleixner fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
3016f4e7d3cSThomas Gleixner fm_pos += sizeof(*fmhdr);
3026f4e7d3cSThomas Gleixner if (fm_pos >= fm_size)
3036f4e7d3cSThomas Gleixner goto fail_bad;
3046f4e7d3cSThomas Gleixner
3056f4e7d3cSThomas Gleixner if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) {
3066f4e7d3cSThomas Gleixner ubi_err("bad fastmap header magic: 0x%x, expected: 0x%x",
3076f4e7d3cSThomas Gleixner be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC);
3086f4e7d3cSThomas Gleixner goto fail_bad;
3096f4e7d3cSThomas Gleixner }
3106f4e7d3cSThomas Gleixner
3116f4e7d3cSThomas Gleixner fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
3126f4e7d3cSThomas Gleixner fm_pos += sizeof(*fmpl1);
3136f4e7d3cSThomas Gleixner if (fm_pos >= fm_size)
3146f4e7d3cSThomas Gleixner goto fail_bad;
3156f4e7d3cSThomas Gleixner if (be32_to_cpu(fmpl1->magic) != UBI_FM_POOL_MAGIC) {
3166f4e7d3cSThomas Gleixner ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x",
3176f4e7d3cSThomas Gleixner be32_to_cpu(fmpl1->magic), UBI_FM_POOL_MAGIC);
3186f4e7d3cSThomas Gleixner goto fail_bad;
3196f4e7d3cSThomas Gleixner }
3206f4e7d3cSThomas Gleixner
3216f4e7d3cSThomas Gleixner fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
3226f4e7d3cSThomas Gleixner fm_pos += sizeof(*fmpl2);
3236f4e7d3cSThomas Gleixner if (fm_pos >= fm_size)
3246f4e7d3cSThomas Gleixner goto fail_bad;
3256f4e7d3cSThomas Gleixner if (be32_to_cpu(fmpl2->magic) != UBI_FM_POOL_MAGIC) {
3266f4e7d3cSThomas Gleixner ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x",
3276f4e7d3cSThomas Gleixner be32_to_cpu(fmpl2->magic), UBI_FM_POOL_MAGIC);
3286f4e7d3cSThomas Gleixner goto fail_bad;
3296f4e7d3cSThomas Gleixner }
3306f4e7d3cSThomas Gleixner
3316f4e7d3cSThomas Gleixner pool_size = be16_to_cpu(fmpl1->size);
3326f4e7d3cSThomas Gleixner wl_pool_size = be16_to_cpu(fmpl2->size);
3336f4e7d3cSThomas Gleixner fm->max_pool_size = be16_to_cpu(fmpl1->max_size);
3346f4e7d3cSThomas Gleixner fm->max_wl_pool_size = be16_to_cpu(fmpl2->max_size);
3356f4e7d3cSThomas Gleixner
3366f4e7d3cSThomas Gleixner if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) {
3376f4e7d3cSThomas Gleixner ubi_err("bad pool size: %i", pool_size);
3386f4e7d3cSThomas Gleixner goto fail_bad;
3396f4e7d3cSThomas Gleixner }
3406f4e7d3cSThomas Gleixner
3416f4e7d3cSThomas Gleixner if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) {
3426f4e7d3cSThomas Gleixner ubi_err("bad WL pool size: %i", wl_pool_size);
3436f4e7d3cSThomas Gleixner goto fail_bad;
3446f4e7d3cSThomas Gleixner }
3456f4e7d3cSThomas Gleixner
3466f4e7d3cSThomas Gleixner if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE ||
3476f4e7d3cSThomas Gleixner fm->max_pool_size < 0) {
3486f4e7d3cSThomas Gleixner ubi_err("bad maximal pool size: %i", fm->max_pool_size);
3496f4e7d3cSThomas Gleixner goto fail_bad;
3506f4e7d3cSThomas Gleixner }
3516f4e7d3cSThomas Gleixner
3526f4e7d3cSThomas Gleixner if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE ||
3536f4e7d3cSThomas Gleixner fm->max_wl_pool_size < 0) {
3546f4e7d3cSThomas Gleixner ubi_err("bad maximal WL pool size: %i", fm->max_wl_pool_size);
3556f4e7d3cSThomas Gleixner goto fail_bad;
3566f4e7d3cSThomas Gleixner }
3576f4e7d3cSThomas Gleixner
3586f4e7d3cSThomas Gleixner /* read EC values from free list */
3596f4e7d3cSThomas Gleixner for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) {
3606f4e7d3cSThomas Gleixner fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
3616f4e7d3cSThomas Gleixner fm_pos += sizeof(*fmec);
3626f4e7d3cSThomas Gleixner if (fm_pos >= fm_size)
3636f4e7d3cSThomas Gleixner goto fail_bad;
3646f4e7d3cSThomas Gleixner }
3656f4e7d3cSThomas Gleixner
3666f4e7d3cSThomas Gleixner /* read EC values from used list */
3676f4e7d3cSThomas Gleixner for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) {
3686f4e7d3cSThomas Gleixner fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
3696f4e7d3cSThomas Gleixner fm_pos += sizeof(*fmec);
3706f4e7d3cSThomas Gleixner if (fm_pos >= fm_size)
3716f4e7d3cSThomas Gleixner goto fail_bad;
3726f4e7d3cSThomas Gleixner
3736f4e7d3cSThomas Gleixner generic_set_bit(be32_to_cpu(fmec->pnum), ubi->fm_used);
3746f4e7d3cSThomas Gleixner }
3756f4e7d3cSThomas Gleixner
3766f4e7d3cSThomas Gleixner /* read EC values from scrub list */
3776f4e7d3cSThomas Gleixner for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) {
3786f4e7d3cSThomas Gleixner fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
3796f4e7d3cSThomas Gleixner fm_pos += sizeof(*fmec);
3806f4e7d3cSThomas Gleixner if (fm_pos >= fm_size)
3816f4e7d3cSThomas Gleixner goto fail_bad;
3826f4e7d3cSThomas Gleixner }
3836f4e7d3cSThomas Gleixner
3846f4e7d3cSThomas Gleixner /* read EC values from erase list */
3856f4e7d3cSThomas Gleixner for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) {
3866f4e7d3cSThomas Gleixner fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
3876f4e7d3cSThomas Gleixner fm_pos += sizeof(*fmec);
3886f4e7d3cSThomas Gleixner if (fm_pos >= fm_size)
3896f4e7d3cSThomas Gleixner goto fail_bad;
3906f4e7d3cSThomas Gleixner }
3916f4e7d3cSThomas Gleixner
3926f4e7d3cSThomas Gleixner /* Iterate over all volumes and read their EBA table */
3936f4e7d3cSThomas Gleixner for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) {
3946f4e7d3cSThomas Gleixner u32 vol_id, vol_type, used, reserved;
3956f4e7d3cSThomas Gleixner
3966f4e7d3cSThomas Gleixner fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
3976f4e7d3cSThomas Gleixner fm_pos += sizeof(*fmvhdr);
3986f4e7d3cSThomas Gleixner if (fm_pos >= fm_size)
3996f4e7d3cSThomas Gleixner goto fail_bad;
4006f4e7d3cSThomas Gleixner
4016f4e7d3cSThomas Gleixner if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) {
4026f4e7d3cSThomas Gleixner ubi_err("bad fastmap vol header magic: 0x%x, " \
4036f4e7d3cSThomas Gleixner "expected: 0x%x",
4046f4e7d3cSThomas Gleixner be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC);
4056f4e7d3cSThomas Gleixner goto fail_bad;
4066f4e7d3cSThomas Gleixner }
4076f4e7d3cSThomas Gleixner
4086f4e7d3cSThomas Gleixner vol_id = be32_to_cpu(fmvhdr->vol_id);
4096f4e7d3cSThomas Gleixner vol_type = fmvhdr->vol_type;
4106f4e7d3cSThomas Gleixner used = be32_to_cpu(fmvhdr->used_ebs);
4116f4e7d3cSThomas Gleixner
4126f4e7d3cSThomas Gleixner fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
4136f4e7d3cSThomas Gleixner fm_pos += sizeof(*fm_eba);
4146f4e7d3cSThomas Gleixner fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs));
4156f4e7d3cSThomas Gleixner if (fm_pos >= fm_size)
4166f4e7d3cSThomas Gleixner goto fail_bad;
4176f4e7d3cSThomas Gleixner
4186f4e7d3cSThomas Gleixner if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) {
4196f4e7d3cSThomas Gleixner ubi_err("bad fastmap EBA header magic: 0x%x, " \
4206f4e7d3cSThomas Gleixner "expected: 0x%x",
4216f4e7d3cSThomas Gleixner be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC);
4226f4e7d3cSThomas Gleixner goto fail_bad;
4236f4e7d3cSThomas Gleixner }
4246f4e7d3cSThomas Gleixner
4256f4e7d3cSThomas Gleixner reserved = be32_to_cpu(fm_eba->reserved_pebs);
4266f4e7d3cSThomas Gleixner ubi_dbg("FA: vol %u used %u res: %u", vol_id, used, reserved);
4276f4e7d3cSThomas Gleixner for (j = 0; j < reserved; j++) {
4286f4e7d3cSThomas Gleixner int pnum = be32_to_cpu(fm_eba->pnum[j]);
4296f4e7d3cSThomas Gleixner
4306f4e7d3cSThomas Gleixner if ((int)be32_to_cpu(fm_eba->pnum[j]) < 0)
4316f4e7d3cSThomas Gleixner continue;
4326f4e7d3cSThomas Gleixner
4336f4e7d3cSThomas Gleixner if (!__test_and_clear_bit(pnum, ubi->fm_used))
4346f4e7d3cSThomas Gleixner continue;
4356f4e7d3cSThomas Gleixner
4366f4e7d3cSThomas Gleixner /*
4376f4e7d3cSThomas Gleixner * We only handle static volumes so used_ebs
4386f4e7d3cSThomas Gleixner * needs to be handed in. And we do not assign
4396f4e7d3cSThomas Gleixner * the reserved blocks
4406f4e7d3cSThomas Gleixner */
4416f4e7d3cSThomas Gleixner if (j >= used)
4426f4e7d3cSThomas Gleixner continue;
4436f4e7d3cSThomas Gleixner
4446f4e7d3cSThomas Gleixner ret = assign_aeb_to_av(ubi, pnum, j, vol_id,
4456f4e7d3cSThomas Gleixner vol_type, used);
4466f4e7d3cSThomas Gleixner if (!ret)
4476f4e7d3cSThomas Gleixner continue;
4486f4e7d3cSThomas Gleixner
4496f4e7d3cSThomas Gleixner /*
4506f4e7d3cSThomas Gleixner * Nasty: The fastmap claims that the volume
4516f4e7d3cSThomas Gleixner * has one block more than it, but that block
4526f4e7d3cSThomas Gleixner * is always empty and the other blocks have
4536f4e7d3cSThomas Gleixner * the correct number of total LEBs in the
4546f4e7d3cSThomas Gleixner * headers. Deal with it.
4556f4e7d3cSThomas Gleixner */
4566f4e7d3cSThomas Gleixner if (ret != UBI_IO_FF && j != used - 1)
4576f4e7d3cSThomas Gleixner goto fail_bad;
4586f4e7d3cSThomas Gleixner ubi_dbg("FA: Vol: %u Ignoring empty LEB %d of %d",
4596f4e7d3cSThomas Gleixner vol_id, j, used);
4606f4e7d3cSThomas Gleixner }
4616f4e7d3cSThomas Gleixner }
4626f4e7d3cSThomas Gleixner
4636f4e7d3cSThomas Gleixner ret = scan_pool(ubi, fmpl1->pebs, pool_size);
4646f4e7d3cSThomas Gleixner if (ret)
4656f4e7d3cSThomas Gleixner goto fail;
4666f4e7d3cSThomas Gleixner
4676f4e7d3cSThomas Gleixner ret = scan_pool(ubi, fmpl2->pebs, wl_pool_size);
4686f4e7d3cSThomas Gleixner if (ret)
4696f4e7d3cSThomas Gleixner goto fail;
4706f4e7d3cSThomas Gleixner
4716f4e7d3cSThomas Gleixner #ifdef CHECKME
4726f4e7d3cSThomas Gleixner /*
4736f4e7d3cSThomas Gleixner * If fastmap is leaking PEBs (must not happen), raise a
4746f4e7d3cSThomas Gleixner * fat warning and fall back to scanning mode.
4756f4e7d3cSThomas Gleixner * We do this here because in ubi_wl_init() it's too late
4766f4e7d3cSThomas Gleixner * and we cannot fall back to scanning.
4776f4e7d3cSThomas Gleixner */
4786f4e7d3cSThomas Gleixner if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count -
4796f4e7d3cSThomas Gleixner ai->bad_peb_count - fm->used_blocks))
4806f4e7d3cSThomas Gleixner goto fail_bad;
4816f4e7d3cSThomas Gleixner #endif
4826f4e7d3cSThomas Gleixner
4836f4e7d3cSThomas Gleixner return 0;
4846f4e7d3cSThomas Gleixner
4856f4e7d3cSThomas Gleixner fail_bad:
4866f4e7d3cSThomas Gleixner ret = UBI_BAD_FASTMAP;
4876f4e7d3cSThomas Gleixner fail:
4886f4e7d3cSThomas Gleixner return ret;
4896f4e7d3cSThomas Gleixner }
4906f4e7d3cSThomas Gleixner
ubi_scan_fastmap(struct ubi_scan_info * ubi,struct ubi_attach_info * ai,int fm_anchor)4916f4e7d3cSThomas Gleixner static int ubi_scan_fastmap(struct ubi_scan_info *ubi,
4926f4e7d3cSThomas Gleixner struct ubi_attach_info *ai,
4936f4e7d3cSThomas Gleixner int fm_anchor)
4946f4e7d3cSThomas Gleixner {
4956f4e7d3cSThomas Gleixner struct ubi_fm_sb *fmsb, *fmsb2;
4966f4e7d3cSThomas Gleixner struct ubi_vid_hdr *vh;
4976f4e7d3cSThomas Gleixner struct ubi_fastmap_layout *fm;
4986f4e7d3cSThomas Gleixner int i, used_blocks, pnum, ret = 0;
4996f4e7d3cSThomas Gleixner size_t fm_size;
5006f4e7d3cSThomas Gleixner __be32 crc, tmp_crc;
5016f4e7d3cSThomas Gleixner unsigned long long sqnum = 0;
5026f4e7d3cSThomas Gleixner
5036f4e7d3cSThomas Gleixner fmsb = &ubi->fm_sb;
5046f4e7d3cSThomas Gleixner fm = &ubi->fm_layout;
5056f4e7d3cSThomas Gleixner
5066f4e7d3cSThomas Gleixner ret = ubi_io_read(ubi, fmsb, fm_anchor, ubi->leb_start, sizeof(*fmsb));
5076f4e7d3cSThomas Gleixner if (ret && ret != UBI_IO_BITFLIPS)
5086f4e7d3cSThomas Gleixner goto free_fm_sb;
5096f4e7d3cSThomas Gleixner else if (ret == UBI_IO_BITFLIPS)
5106f4e7d3cSThomas Gleixner fm->to_be_tortured[0] = 1;
5116f4e7d3cSThomas Gleixner
5126f4e7d3cSThomas Gleixner if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) {
5136f4e7d3cSThomas Gleixner ubi_err("bad super block magic: 0x%x, expected: 0x%x",
5146f4e7d3cSThomas Gleixner be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC);
5156f4e7d3cSThomas Gleixner ret = UBI_BAD_FASTMAP;
5166f4e7d3cSThomas Gleixner goto free_fm_sb;
5176f4e7d3cSThomas Gleixner }
5186f4e7d3cSThomas Gleixner
5196f4e7d3cSThomas Gleixner if (fmsb->version != UBI_FM_FMT_VERSION) {
5206f4e7d3cSThomas Gleixner ubi_err("bad fastmap version: %i, expected: %i",
5216f4e7d3cSThomas Gleixner fmsb->version, UBI_FM_FMT_VERSION);
5226f4e7d3cSThomas Gleixner ret = UBI_BAD_FASTMAP;
5236f4e7d3cSThomas Gleixner goto free_fm_sb;
5246f4e7d3cSThomas Gleixner }
5256f4e7d3cSThomas Gleixner
5266f4e7d3cSThomas Gleixner used_blocks = be32_to_cpu(fmsb->used_blocks);
5276f4e7d3cSThomas Gleixner if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) {
5286f4e7d3cSThomas Gleixner ubi_err("number of fastmap blocks is invalid: %i", used_blocks);
5296f4e7d3cSThomas Gleixner ret = UBI_BAD_FASTMAP;
5306f4e7d3cSThomas Gleixner goto free_fm_sb;
5316f4e7d3cSThomas Gleixner }
5326f4e7d3cSThomas Gleixner
5336f4e7d3cSThomas Gleixner fm_size = ubi->leb_size * used_blocks;
5346f4e7d3cSThomas Gleixner if (fm_size != ubi->fm_size) {
5356f4e7d3cSThomas Gleixner ubi_err("bad fastmap size: %zi, expected: %zi", fm_size,
5366f4e7d3cSThomas Gleixner ubi->fm_size);
5376f4e7d3cSThomas Gleixner ret = UBI_BAD_FASTMAP;
5386f4e7d3cSThomas Gleixner goto free_fm_sb;
5396f4e7d3cSThomas Gleixner }
5406f4e7d3cSThomas Gleixner
5416f4e7d3cSThomas Gleixner vh = &ubi->fm_vh;
5426f4e7d3cSThomas Gleixner
5436f4e7d3cSThomas Gleixner for (i = 0; i < used_blocks; i++) {
5446f4e7d3cSThomas Gleixner pnum = be32_to_cpu(fmsb->block_loc[i]);
5456f4e7d3cSThomas Gleixner
5466f4e7d3cSThomas Gleixner if (ubi_io_is_bad(ubi, pnum)) {
5476f4e7d3cSThomas Gleixner ret = UBI_BAD_FASTMAP;
5486f4e7d3cSThomas Gleixner goto free_hdr;
5496f4e7d3cSThomas Gleixner }
5506f4e7d3cSThomas Gleixner
5516f4e7d3cSThomas Gleixner #ifdef LATER
5526f4e7d3cSThomas Gleixner int image_seq;
5536f4e7d3cSThomas Gleixner ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
5546f4e7d3cSThomas Gleixner if (ret && ret != UBI_IO_BITFLIPS) {
5556f4e7d3cSThomas Gleixner ubi_err("unable to read fastmap block# %i EC (PEB: %i)",
5566f4e7d3cSThomas Gleixner i, pnum);
5576f4e7d3cSThomas Gleixner if (ret > 0)
5586f4e7d3cSThomas Gleixner ret = UBI_BAD_FASTMAP;
5596f4e7d3cSThomas Gleixner goto free_hdr;
5606f4e7d3cSThomas Gleixner } else if (ret == UBI_IO_BITFLIPS)
5616f4e7d3cSThomas Gleixner fm->to_be_tortured[i] = 1;
5626f4e7d3cSThomas Gleixner
5636f4e7d3cSThomas Gleixner image_seq = be32_to_cpu(ech->image_seq);
5646f4e7d3cSThomas Gleixner if (!ubi->image_seq)
5656f4e7d3cSThomas Gleixner ubi->image_seq = image_seq;
5666f4e7d3cSThomas Gleixner /*
5676f4e7d3cSThomas Gleixner * Older UBI implementations have image_seq set to zero, so
5686f4e7d3cSThomas Gleixner * we shouldn't fail if image_seq == 0.
5696f4e7d3cSThomas Gleixner */
5706f4e7d3cSThomas Gleixner if (image_seq && (image_seq != ubi->image_seq)) {
5716f4e7d3cSThomas Gleixner ubi_err("wrong image seq:%d instead of %d",
5726f4e7d3cSThomas Gleixner be32_to_cpu(ech->image_seq), ubi->image_seq);
5736f4e7d3cSThomas Gleixner ret = UBI_BAD_FASTMAP;
5746f4e7d3cSThomas Gleixner goto free_hdr;
5756f4e7d3cSThomas Gleixner }
5766f4e7d3cSThomas Gleixner #endif
5776f4e7d3cSThomas Gleixner ret = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
5786f4e7d3cSThomas Gleixner if (ret && ret != UBI_IO_BITFLIPS) {
5796f4e7d3cSThomas Gleixner ubi_err("unable to read fastmap block# %i (PEB: %i)",
5806f4e7d3cSThomas Gleixner i, pnum);
5816f4e7d3cSThomas Gleixner goto free_hdr;
5826f4e7d3cSThomas Gleixner }
5836f4e7d3cSThomas Gleixner
5846f4e7d3cSThomas Gleixner /*
5856f4e7d3cSThomas Gleixner * Mainline code rescans the anchor header. We've done
5866f4e7d3cSThomas Gleixner * that already so we merily copy it over.
5876f4e7d3cSThomas Gleixner */
5886f4e7d3cSThomas Gleixner if (pnum == fm_anchor)
5896f4e7d3cSThomas Gleixner memcpy(vh, ubi->blockinfo + pnum, sizeof(*fm));
5906f4e7d3cSThomas Gleixner
5916f4e7d3cSThomas Gleixner if (i == 0) {
5926f4e7d3cSThomas Gleixner if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) {
5936f4e7d3cSThomas Gleixner ubi_err("bad fastmap anchor vol_id: 0x%x," \
5946f4e7d3cSThomas Gleixner " expected: 0x%x",
5956f4e7d3cSThomas Gleixner be32_to_cpu(vh->vol_id),
5966f4e7d3cSThomas Gleixner UBI_FM_SB_VOLUME_ID);
5976f4e7d3cSThomas Gleixner ret = UBI_BAD_FASTMAP;
5986f4e7d3cSThomas Gleixner goto free_hdr;
5996f4e7d3cSThomas Gleixner }
6006f4e7d3cSThomas Gleixner } else {
6016f4e7d3cSThomas Gleixner if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) {
6026f4e7d3cSThomas Gleixner ubi_err("bad fastmap data vol_id: 0x%x," \
6036f4e7d3cSThomas Gleixner " expected: 0x%x",
6046f4e7d3cSThomas Gleixner be32_to_cpu(vh->vol_id),
6056f4e7d3cSThomas Gleixner UBI_FM_DATA_VOLUME_ID);
6066f4e7d3cSThomas Gleixner ret = UBI_BAD_FASTMAP;
6076f4e7d3cSThomas Gleixner goto free_hdr;
6086f4e7d3cSThomas Gleixner }
6096f4e7d3cSThomas Gleixner }
6106f4e7d3cSThomas Gleixner
6116f4e7d3cSThomas Gleixner if (sqnum < be64_to_cpu(vh->sqnum))
6126f4e7d3cSThomas Gleixner sqnum = be64_to_cpu(vh->sqnum);
6136f4e7d3cSThomas Gleixner
6146f4e7d3cSThomas Gleixner ret = ubi_io_read(ubi, ubi->fm_buf + (ubi->leb_size * i), pnum,
6156f4e7d3cSThomas Gleixner ubi->leb_start, ubi->leb_size);
6166f4e7d3cSThomas Gleixner if (ret && ret != UBI_IO_BITFLIPS) {
6176f4e7d3cSThomas Gleixner ubi_err("unable to read fastmap block# %i (PEB: %i, " \
6186f4e7d3cSThomas Gleixner "err: %i)", i, pnum, ret);
6196f4e7d3cSThomas Gleixner goto free_hdr;
6206f4e7d3cSThomas Gleixner }
6216f4e7d3cSThomas Gleixner }
6226f4e7d3cSThomas Gleixner
6236f4e7d3cSThomas Gleixner fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf);
6246f4e7d3cSThomas Gleixner tmp_crc = be32_to_cpu(fmsb2->data_crc);
6256f4e7d3cSThomas Gleixner fmsb2->data_crc = 0;
6266f4e7d3cSThomas Gleixner crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size);
6276f4e7d3cSThomas Gleixner if (crc != tmp_crc) {
6286f4e7d3cSThomas Gleixner ubi_err("fastmap data CRC is invalid");
6296f4e7d3cSThomas Gleixner ubi_err("CRC should be: 0x%x, calc: 0x%x", tmp_crc, crc);
6306f4e7d3cSThomas Gleixner ret = UBI_BAD_FASTMAP;
6316f4e7d3cSThomas Gleixner goto free_hdr;
6326f4e7d3cSThomas Gleixner }
6336f4e7d3cSThomas Gleixner
6346f4e7d3cSThomas Gleixner fmsb2->sqnum = sqnum;
6356f4e7d3cSThomas Gleixner
6366f4e7d3cSThomas Gleixner fm->used_blocks = used_blocks;
6376f4e7d3cSThomas Gleixner
6386f4e7d3cSThomas Gleixner ret = ubi_attach_fastmap(ubi, ai, fm);
6396f4e7d3cSThomas Gleixner if (ret) {
6406f4e7d3cSThomas Gleixner if (ret > 0)
6416f4e7d3cSThomas Gleixner ret = UBI_BAD_FASTMAP;
6426f4e7d3cSThomas Gleixner goto free_hdr;
6436f4e7d3cSThomas Gleixner }
6446f4e7d3cSThomas Gleixner
6456f4e7d3cSThomas Gleixner ubi->fm = fm;
6466f4e7d3cSThomas Gleixner ubi->fm_pool.max_size = ubi->fm->max_pool_size;
6476f4e7d3cSThomas Gleixner ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size;
6486f4e7d3cSThomas Gleixner ubi_msg("attached by fastmap %uMB %u blocks",
6496f4e7d3cSThomas Gleixner ubi->fsize_mb, ubi->peb_count);
6506f4e7d3cSThomas Gleixner ubi_dbg("fastmap pool size: %d", ubi->fm_pool.max_size);
6516f4e7d3cSThomas Gleixner ubi_dbg("fastmap WL pool size: %d", ubi->fm_wl_pool.max_size);
6526f4e7d3cSThomas Gleixner
6536f4e7d3cSThomas Gleixner out:
6546f4e7d3cSThomas Gleixner if (ret)
6556f4e7d3cSThomas Gleixner ubi_err("Attach by fastmap failed, doing a full scan!");
6566f4e7d3cSThomas Gleixner return ret;
6576f4e7d3cSThomas Gleixner
6586f4e7d3cSThomas Gleixner free_hdr:
6596f4e7d3cSThomas Gleixner free_fm_sb:
6606f4e7d3cSThomas Gleixner goto out;
6616f4e7d3cSThomas Gleixner }
6626f4e7d3cSThomas Gleixner
6636f4e7d3cSThomas Gleixner /*
6646f4e7d3cSThomas Gleixner * Scan the flash and attempt to attach via fastmap
6656f4e7d3cSThomas Gleixner */
ipl_scan(struct ubi_scan_info * ubi)6666f4e7d3cSThomas Gleixner static void ipl_scan(struct ubi_scan_info *ubi)
6676f4e7d3cSThomas Gleixner {
6686f4e7d3cSThomas Gleixner unsigned int pnum;
6696f4e7d3cSThomas Gleixner int res;
6706f4e7d3cSThomas Gleixner
6716f4e7d3cSThomas Gleixner /*
6726f4e7d3cSThomas Gleixner * Scan first for the fastmap super block
6736f4e7d3cSThomas Gleixner */
6746f4e7d3cSThomas Gleixner for (pnum = 0; pnum < UBI_FM_MAX_START; pnum++) {
6756f4e7d3cSThomas Gleixner res = ubi_scan_vid_hdr(ubi, ubi->blockinfo + pnum, pnum);
6766f4e7d3cSThomas Gleixner /*
6776f4e7d3cSThomas Gleixner * We ignore errors here as we are meriliy scanning
6786f4e7d3cSThomas Gleixner * the headers.
6796f4e7d3cSThomas Gleixner */
6806f4e7d3cSThomas Gleixner if (res != UBI_FASTMAP_ANCHOR)
6816f4e7d3cSThomas Gleixner continue;
6826f4e7d3cSThomas Gleixner
6836f4e7d3cSThomas Gleixner /*
6846f4e7d3cSThomas Gleixner * If fastmap is disabled, continue scanning. This
6856f4e7d3cSThomas Gleixner * might happen because the previous attempt failed or
6866f4e7d3cSThomas Gleixner * the caller disabled it right away.
6876f4e7d3cSThomas Gleixner */
6886f4e7d3cSThomas Gleixner if (!ubi->fm_enabled)
6896f4e7d3cSThomas Gleixner continue;
6906f4e7d3cSThomas Gleixner
6916f4e7d3cSThomas Gleixner /*
6926f4e7d3cSThomas Gleixner * Try to attach the fastmap, if that fails continue
6936f4e7d3cSThomas Gleixner * scanning.
6946f4e7d3cSThomas Gleixner */
6956f4e7d3cSThomas Gleixner if (!ubi_scan_fastmap(ubi, NULL, pnum))
6966f4e7d3cSThomas Gleixner return;
6976f4e7d3cSThomas Gleixner /*
6986f4e7d3cSThomas Gleixner * Fastmap failed. Clear everything we have and start
6996f4e7d3cSThomas Gleixner * over. We are paranoid and do not trust anything.
7006f4e7d3cSThomas Gleixner */
7016f4e7d3cSThomas Gleixner memset(ubi->volinfo, 0, sizeof(ubi->volinfo));
7026f4e7d3cSThomas Gleixner pnum = 0;
7036f4e7d3cSThomas Gleixner break;
7046f4e7d3cSThomas Gleixner }
7056f4e7d3cSThomas Gleixner
7066f4e7d3cSThomas Gleixner /*
7076f4e7d3cSThomas Gleixner * Continue scanning, ignore errors, we might find what we are
7086f4e7d3cSThomas Gleixner * looking for,
7096f4e7d3cSThomas Gleixner */
7106f4e7d3cSThomas Gleixner for (; pnum < ubi->peb_count; pnum++)
7116f4e7d3cSThomas Gleixner ubi_scan_vid_hdr(ubi, ubi->blockinfo + pnum, pnum);
7126f4e7d3cSThomas Gleixner }
7136f4e7d3cSThomas Gleixner
7146f4e7d3cSThomas Gleixner /*
7156f4e7d3cSThomas Gleixner * Load a logical block of a volume into memory
7166f4e7d3cSThomas Gleixner */
ubi_load_block(struct ubi_scan_info * ubi,uint8_t * laddr,struct ubi_vol_info * vi,u32 vol_id,u32 lnum,u32 last)7176f4e7d3cSThomas Gleixner static int ubi_load_block(struct ubi_scan_info *ubi, uint8_t *laddr,
7186f4e7d3cSThomas Gleixner struct ubi_vol_info *vi, u32 vol_id, u32 lnum,
7196f4e7d3cSThomas Gleixner u32 last)
7206f4e7d3cSThomas Gleixner {
7216f4e7d3cSThomas Gleixner struct ubi_vid_hdr *vh, *vrepl;
7226f4e7d3cSThomas Gleixner u32 pnum, crc, dlen;
7236f4e7d3cSThomas Gleixner
7246f4e7d3cSThomas Gleixner retry:
7256f4e7d3cSThomas Gleixner /*
7266f4e7d3cSThomas Gleixner * If this is a fastmap run, we try to rescan full, otherwise
7276f4e7d3cSThomas Gleixner * we simply give up.
7286f4e7d3cSThomas Gleixner */
7296f4e7d3cSThomas Gleixner if (!test_bit(lnum, vi->found)) {
7306f4e7d3cSThomas Gleixner ubi_warn("LEB %d of %d is missing", lnum, last);
7316f4e7d3cSThomas Gleixner return -EINVAL;
7326f4e7d3cSThomas Gleixner }
7336f4e7d3cSThomas Gleixner
7346f4e7d3cSThomas Gleixner pnum = vi->lebs_to_pebs[lnum];
7356f4e7d3cSThomas Gleixner
7366f4e7d3cSThomas Gleixner ubi_dbg("Load vol %u LEB %u PEB %u", vol_id, lnum, pnum);
7376f4e7d3cSThomas Gleixner
7386f4e7d3cSThomas Gleixner if (ubi_io_is_bad(ubi, pnum)) {
7396f4e7d3cSThomas Gleixner ubi_warn("Corrupted mapping block %d PB %d\n", lnum, pnum);
7406f4e7d3cSThomas Gleixner return -EINVAL;
7416f4e7d3cSThomas Gleixner }
7426f4e7d3cSThomas Gleixner
7436f4e7d3cSThomas Gleixner if (test_bit(pnum, ubi->corrupt))
7446f4e7d3cSThomas Gleixner goto find_other;
7456f4e7d3cSThomas Gleixner
7466f4e7d3cSThomas Gleixner /*
7476f4e7d3cSThomas Gleixner * Lets try to read that block
7486f4e7d3cSThomas Gleixner */
7496f4e7d3cSThomas Gleixner vh = ubi->blockinfo + pnum;
7506f4e7d3cSThomas Gleixner
7516f4e7d3cSThomas Gleixner if (!test_bit(pnum, ubi->scanned)) {
7526f4e7d3cSThomas Gleixner ubi_warn("Vol: %u LEB %u PEB %u not yet scanned", vol_id,
7536f4e7d3cSThomas Gleixner lnum, pnum);
7546f4e7d3cSThomas Gleixner if (ubi_rescan_fm_vid_hdr(ubi, vh, pnum, vol_id, lnum))
7556f4e7d3cSThomas Gleixner goto find_other;
7566f4e7d3cSThomas Gleixner }
7576f4e7d3cSThomas Gleixner
7586f4e7d3cSThomas Gleixner /*
7596f4e7d3cSThomas Gleixner * Check, if the total number of blocks is correct
7606f4e7d3cSThomas Gleixner */
7616f4e7d3cSThomas Gleixner if (be32_to_cpu(vh->used_ebs) != last) {
7626f4e7d3cSThomas Gleixner ubi_dbg("Block count missmatch.");
7636f4e7d3cSThomas Gleixner ubi_dbg("vh->used_ebs: %d nrblocks: %d",
7646f4e7d3cSThomas Gleixner be32_to_cpu(vh->used_ebs), last);
7656f4e7d3cSThomas Gleixner generic_set_bit(pnum, ubi->corrupt);
7666f4e7d3cSThomas Gleixner goto find_other;
7676f4e7d3cSThomas Gleixner }
7686f4e7d3cSThomas Gleixner
7696f4e7d3cSThomas Gleixner /*
7706f4e7d3cSThomas Gleixner * Get the data length of this block.
7716f4e7d3cSThomas Gleixner */
7726f4e7d3cSThomas Gleixner dlen = be32_to_cpu(vh->data_size);
7736f4e7d3cSThomas Gleixner
7746f4e7d3cSThomas Gleixner /*
7756f4e7d3cSThomas Gleixner * Read the data into RAM. We ignore the return value
7766f4e7d3cSThomas Gleixner * here as the only thing which might go wrong are
7776f4e7d3cSThomas Gleixner * bitflips. Try nevertheless.
7786f4e7d3cSThomas Gleixner */
7796f4e7d3cSThomas Gleixner ubi_io_read(ubi, laddr, pnum, ubi->leb_start, dlen);
7806f4e7d3cSThomas Gleixner
7816f4e7d3cSThomas Gleixner /* Calculate CRC over the data */
7826f4e7d3cSThomas Gleixner crc = crc32(UBI_CRC32_INIT, laddr, dlen);
7836f4e7d3cSThomas Gleixner
7846f4e7d3cSThomas Gleixner if (crc != be32_to_cpu(vh->data_crc)) {
7856f4e7d3cSThomas Gleixner ubi_warn("Vol: %u LEB %u PEB %u data CRC failure", vol_id,
7866f4e7d3cSThomas Gleixner lnum, pnum);
7876f4e7d3cSThomas Gleixner generic_set_bit(pnum, ubi->corrupt);
7886f4e7d3cSThomas Gleixner goto find_other;
7896f4e7d3cSThomas Gleixner }
7906f4e7d3cSThomas Gleixner
7916f4e7d3cSThomas Gleixner /* We are good. Return the data length we read */
7926f4e7d3cSThomas Gleixner return dlen;
7936f4e7d3cSThomas Gleixner
7946f4e7d3cSThomas Gleixner find_other:
7956f4e7d3cSThomas Gleixner ubi_dbg("Find replacement for LEB %u PEB %u", lnum, pnum);
7966f4e7d3cSThomas Gleixner generic_clear_bit(lnum, vi->found);
7976f4e7d3cSThomas Gleixner vrepl = NULL;
7986f4e7d3cSThomas Gleixner
7996f4e7d3cSThomas Gleixner for (pnum = 0; pnum < ubi->peb_count; pnum++) {
8006f4e7d3cSThomas Gleixner struct ubi_vid_hdr *tmp = ubi->blockinfo + pnum;
8016f4e7d3cSThomas Gleixner u32 t_vol_id = be32_to_cpu(tmp->vol_id);
8026f4e7d3cSThomas Gleixner u32 t_lnum = be32_to_cpu(tmp->lnum);
8036f4e7d3cSThomas Gleixner
8046f4e7d3cSThomas Gleixner if (test_bit(pnum, ubi->corrupt))
8056f4e7d3cSThomas Gleixner continue;
8066f4e7d3cSThomas Gleixner
8076f4e7d3cSThomas Gleixner if (t_vol_id != vol_id || t_lnum != lnum)
8086f4e7d3cSThomas Gleixner continue;
8096f4e7d3cSThomas Gleixner
8106f4e7d3cSThomas Gleixner if (!test_bit(pnum, ubi->scanned)) {
8116f4e7d3cSThomas Gleixner ubi_warn("Vol: %u LEB %u PEB %u not yet scanned",
8126f4e7d3cSThomas Gleixner vol_id, lnum, pnum);
8136f4e7d3cSThomas Gleixner if (ubi_rescan_fm_vid_hdr(ubi, tmp, pnum, vol_id, lnum))
8146f4e7d3cSThomas Gleixner continue;
8156f4e7d3cSThomas Gleixner }
8166f4e7d3cSThomas Gleixner
8176f4e7d3cSThomas Gleixner /*
8186f4e7d3cSThomas Gleixner * We found one. If its the first, assign it otherwise
8196f4e7d3cSThomas Gleixner * compare the sqnum
8206f4e7d3cSThomas Gleixner */
8216f4e7d3cSThomas Gleixner generic_set_bit(lnum, vi->found);
8226f4e7d3cSThomas Gleixner
8236f4e7d3cSThomas Gleixner if (!vrepl) {
8246f4e7d3cSThomas Gleixner vrepl = tmp;
8256f4e7d3cSThomas Gleixner continue;
8266f4e7d3cSThomas Gleixner }
8276f4e7d3cSThomas Gleixner
8286f4e7d3cSThomas Gleixner if (be64_to_cpu(vrepl->sqnum) < be64_to_cpu(tmp->sqnum))
8296f4e7d3cSThomas Gleixner vrepl = tmp;
8306f4e7d3cSThomas Gleixner }
8316f4e7d3cSThomas Gleixner
8326f4e7d3cSThomas Gleixner if (vrepl) {
8336f4e7d3cSThomas Gleixner /* Update the vi table */
8346f4e7d3cSThomas Gleixner pnum = vrepl - ubi->blockinfo;
8356f4e7d3cSThomas Gleixner vi->lebs_to_pebs[lnum] = pnum;
8366f4e7d3cSThomas Gleixner ubi_dbg("Trying PEB %u for LEB %u", pnum, lnum);
8376f4e7d3cSThomas Gleixner vh = vrepl;
8386f4e7d3cSThomas Gleixner }
8396f4e7d3cSThomas Gleixner goto retry;
8406f4e7d3cSThomas Gleixner }
8416f4e7d3cSThomas Gleixner
8426f4e7d3cSThomas Gleixner /*
8436f4e7d3cSThomas Gleixner * Load a volume into RAM
8446f4e7d3cSThomas Gleixner */
ipl_load(struct ubi_scan_info * ubi,const u32 vol_id,uint8_t * laddr)8456f4e7d3cSThomas Gleixner static int ipl_load(struct ubi_scan_info *ubi, const u32 vol_id, uint8_t *laddr)
8466f4e7d3cSThomas Gleixner {
8476f4e7d3cSThomas Gleixner struct ubi_vol_info *vi;
8486f4e7d3cSThomas Gleixner u32 lnum, last, len;
8496f4e7d3cSThomas Gleixner
8506f4e7d3cSThomas Gleixner if (vol_id >= UBI_SPL_VOL_IDS)
8516f4e7d3cSThomas Gleixner return -EINVAL;
8526f4e7d3cSThomas Gleixner
8536f4e7d3cSThomas Gleixner len = 0;
8546f4e7d3cSThomas Gleixner vi = ubi->volinfo + vol_id;
8556f4e7d3cSThomas Gleixner last = vi->last_block + 1;
8566f4e7d3cSThomas Gleixner
8576f4e7d3cSThomas Gleixner /* Read the blocks to RAM, check CRC */
8586f4e7d3cSThomas Gleixner for (lnum = 0 ; lnum < last; lnum++) {
8596f4e7d3cSThomas Gleixner int res = ubi_load_block(ubi, laddr, vi, vol_id, lnum, last);
8606f4e7d3cSThomas Gleixner
8616f4e7d3cSThomas Gleixner if (res < 0) {
8626f4e7d3cSThomas Gleixner ubi_warn("Failed to load volume %u", vol_id);
8636f4e7d3cSThomas Gleixner return res;
8646f4e7d3cSThomas Gleixner }
8656f4e7d3cSThomas Gleixner /* res is the data length of the read block */
8666f4e7d3cSThomas Gleixner laddr += res;
8676f4e7d3cSThomas Gleixner len += res;
8686f4e7d3cSThomas Gleixner }
8696f4e7d3cSThomas Gleixner return len;
8706f4e7d3cSThomas Gleixner }
8716f4e7d3cSThomas Gleixner
ubispl_load_volumes(struct ubispl_info * info,struct ubispl_load * lvols,int nrvols)8726f4e7d3cSThomas Gleixner int ubispl_load_volumes(struct ubispl_info *info, struct ubispl_load *lvols,
8736f4e7d3cSThomas Gleixner int nrvols)
8746f4e7d3cSThomas Gleixner {
8756f4e7d3cSThomas Gleixner struct ubi_scan_info *ubi = info->ubi;
8766f4e7d3cSThomas Gleixner int res, i, fastmap = info->fastmap;
8776f4e7d3cSThomas Gleixner u32 fsize;
8786f4e7d3cSThomas Gleixner
8796f4e7d3cSThomas Gleixner retry:
8806f4e7d3cSThomas Gleixner /*
8816f4e7d3cSThomas Gleixner * We do a partial initializiation of @ubi. Cleaning fm_buf is
8826f4e7d3cSThomas Gleixner * not necessary.
8836f4e7d3cSThomas Gleixner */
8846f4e7d3cSThomas Gleixner memset(ubi, 0, offsetof(struct ubi_scan_info, fm_buf));
8856f4e7d3cSThomas Gleixner
8866f4e7d3cSThomas Gleixner ubi->read = info->read;
8876f4e7d3cSThomas Gleixner
8886f4e7d3cSThomas Gleixner /* Precalculate the offsets */
8896f4e7d3cSThomas Gleixner ubi->vid_offset = info->vid_offset;
8906f4e7d3cSThomas Gleixner ubi->leb_start = info->leb_start;
8916f4e7d3cSThomas Gleixner ubi->leb_size = info->peb_size - ubi->leb_start;
8926f4e7d3cSThomas Gleixner ubi->peb_count = info->peb_count;
8936f4e7d3cSThomas Gleixner ubi->peb_offset = info->peb_offset;
8946f4e7d3cSThomas Gleixner
8956f4e7d3cSThomas Gleixner fsize = info->peb_size * info->peb_count;
8966f4e7d3cSThomas Gleixner ubi->fsize_mb = fsize >> 20;
8976f4e7d3cSThomas Gleixner
8986f4e7d3cSThomas Gleixner /* Fastmap init */
8996f4e7d3cSThomas Gleixner ubi->fm_size = ubi_calc_fm_size(ubi);
9006f4e7d3cSThomas Gleixner ubi->fm_enabled = fastmap;
9016f4e7d3cSThomas Gleixner
9026f4e7d3cSThomas Gleixner for (i = 0; i < nrvols; i++) {
9036f4e7d3cSThomas Gleixner struct ubispl_load *lv = lvols + i;
9046f4e7d3cSThomas Gleixner
9056f4e7d3cSThomas Gleixner generic_set_bit(lv->vol_id, ubi->toload);
9066f4e7d3cSThomas Gleixner }
9076f4e7d3cSThomas Gleixner
9086f4e7d3cSThomas Gleixner ipl_scan(ubi);
9096f4e7d3cSThomas Gleixner
9106f4e7d3cSThomas Gleixner for (i = 0; i < nrvols; i++) {
9116f4e7d3cSThomas Gleixner struct ubispl_load *lv = lvols + i;
9126f4e7d3cSThomas Gleixner
9136f4e7d3cSThomas Gleixner ubi_msg("Loading VolId #%d", lv->vol_id);
9146f4e7d3cSThomas Gleixner res = ipl_load(ubi, lv->vol_id, lv->load_addr);
9156f4e7d3cSThomas Gleixner if (res < 0) {
9166f4e7d3cSThomas Gleixner if (fastmap) {
9176f4e7d3cSThomas Gleixner fastmap = 0;
9186f4e7d3cSThomas Gleixner goto retry;
9196f4e7d3cSThomas Gleixner }
9206f4e7d3cSThomas Gleixner ubi_warn("Failed");
9216f4e7d3cSThomas Gleixner return res;
9226f4e7d3cSThomas Gleixner }
9236f4e7d3cSThomas Gleixner }
9246f4e7d3cSThomas Gleixner return 0;
9256f4e7d3cSThomas Gleixner }
926