xref: /openbmc/u-boot/drivers/mtd/ubispl/ubispl.c (revision e8f80a5a)
1 // SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause
2 /*
3  * Copyright (c) Thomas Gleixner <tglx@linutronix.de>
4  *
5  * The parts taken from the kernel implementation are:
6  *
7  * Copyright (c) International Business Machines Corp., 2006
8  */
9 
10 #include <common.h>
11 #include <errno.h>
12 #include <ubispl.h>
13 
14 #include <linux/crc32.h>
15 
16 #include "ubispl.h"
17 
18 /**
19  * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device.
20  * @ubi: UBI device description object
21  */
ubi_calc_fm_size(struct ubi_scan_info * ubi)22 static size_t ubi_calc_fm_size(struct ubi_scan_info *ubi)
23 {
24 	size_t size;
25 
26 	size = sizeof(struct ubi_fm_sb) +
27 		sizeof(struct ubi_fm_hdr) +
28 		sizeof(struct ubi_fm_scan_pool) +
29 		sizeof(struct ubi_fm_scan_pool) +
30 		(ubi->peb_count * sizeof(struct ubi_fm_ec)) +
31 		(sizeof(struct ubi_fm_eba) +
32 		(ubi->peb_count * sizeof(__be32))) +
33 		sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
34 	return roundup(size, ubi->leb_size);
35 }
36 
ubi_io_read(struct ubi_scan_info * ubi,void * buf,int pnum,unsigned long from,unsigned long len)37 static int ubi_io_read(struct ubi_scan_info *ubi, void *buf, int pnum,
38 		       unsigned long from, unsigned long len)
39 {
40 	return ubi->read(pnum + ubi->peb_offset, from, len, buf);
41 }
42 
ubi_io_is_bad(struct ubi_scan_info * ubi,int peb)43 static int ubi_io_is_bad(struct ubi_scan_info *ubi, int peb)
44 {
45 	return peb >= ubi->peb_count || peb < 0;
46 }
47 
ubi_io_read_vid_hdr(struct ubi_scan_info * ubi,int pnum,struct ubi_vid_hdr * vh,int unused)48 static int ubi_io_read_vid_hdr(struct ubi_scan_info *ubi, int pnum,
49 			       struct ubi_vid_hdr *vh, int unused)
50 {
51 	u32 magic;
52 	int res;
53 
54 	/* No point in rescanning a corrupt block */
55 	if (test_bit(pnum, ubi->corrupt))
56 		return UBI_IO_BAD_HDR;
57 	/*
58 	 * If the block has been scanned already, no need to rescan
59 	 */
60 	if (test_and_set_bit(pnum, ubi->scanned))
61 		return 0;
62 
63 	res = ubi_io_read(ubi, vh, pnum, ubi->vid_offset, sizeof(*vh));
64 
65 	/*
66 	 * Bad block, unrecoverable ECC error, skip the block
67 	 */
68 	if (res) {
69 		ubi_dbg("Skipping bad or unreadable block %d", pnum);
70 		vh->magic = 0;
71 		generic_set_bit(pnum, ubi->corrupt);
72 		return res;
73 	}
74 
75 	/* Magic number available ? */
76 	magic = be32_to_cpu(vh->magic);
77 	if (magic != UBI_VID_HDR_MAGIC) {
78 		generic_set_bit(pnum, ubi->corrupt);
79 		if (magic == 0xffffffff)
80 			return UBI_IO_FF;
81 		ubi_msg("Bad magic in block 0%d %08x", pnum, magic);
82 		return UBI_IO_BAD_HDR;
83 	}
84 
85 	/* Header CRC correct ? */
86 	if (crc32(UBI_CRC32_INIT, vh, UBI_VID_HDR_SIZE_CRC) !=
87 	    be32_to_cpu(vh->hdr_crc)) {
88 		ubi_msg("Bad CRC in block 0%d", pnum);
89 		generic_set_bit(pnum, ubi->corrupt);
90 		return UBI_IO_BAD_HDR;
91 	}
92 
93 	ubi_dbg("RV: pnum: %i sqnum %llu", pnum, be64_to_cpu(vh->sqnum));
94 
95 	return 0;
96 }
97 
ubi_rescan_fm_vid_hdr(struct ubi_scan_info * ubi,struct ubi_vid_hdr * vh,u32 fm_pnum,u32 fm_vol_id,u32 fm_lnum)98 static int ubi_rescan_fm_vid_hdr(struct ubi_scan_info *ubi,
99 				 struct ubi_vid_hdr *vh,
100 				 u32 fm_pnum, u32 fm_vol_id, u32 fm_lnum)
101 {
102 	int res;
103 
104 	if (ubi_io_is_bad(ubi, fm_pnum))
105 		return -EINVAL;
106 
107 	res = ubi_io_read_vid_hdr(ubi, fm_pnum, vh, 0);
108 	if (!res) {
109 		/* Check volume id, volume type and lnum */
110 		if (be32_to_cpu(vh->vol_id) == fm_vol_id &&
111 		    vh->vol_type == UBI_VID_STATIC &&
112 		    be32_to_cpu(vh->lnum) == fm_lnum)
113 			return 0;
114 		ubi_dbg("RS: PEB %u vol: %u : %u typ %u lnum %u %u",
115 			fm_pnum, fm_vol_id, vh->vol_type,
116 			be32_to_cpu(vh->vol_id),
117 			fm_lnum, be32_to_cpu(vh->lnum));
118 	}
119 	return res;
120 }
121 
122 /* Insert the logic block into the volume info */
ubi_add_peb_to_vol(struct ubi_scan_info * ubi,struct ubi_vid_hdr * vh,u32 vol_id,u32 pnum,u32 lnum)123 static int ubi_add_peb_to_vol(struct ubi_scan_info *ubi,
124 			      struct ubi_vid_hdr *vh, u32 vol_id,
125 			      u32 pnum, u32 lnum)
126 {
127 	struct ubi_vol_info *vi = ubi->volinfo + vol_id;
128 	u32 *ltp;
129 
130 	/*
131 	 * If the volume is larger than expected, yell and give up :(
132 	 */
133 	if (lnum >= UBI_MAX_VOL_LEBS) {
134 		ubi_warn("Vol: %u LEB %d > %d", vol_id, lnum, UBI_MAX_VOL_LEBS);
135 		return -EINVAL;
136 	}
137 
138 	ubi_dbg("SC: Add PEB %u to Vol %u as LEB %u fnd %d sc %d",
139 		pnum, vol_id, lnum, !!test_bit(lnum, vi->found),
140 		!!test_bit(pnum, ubi->scanned));
141 
142 	/* Points to the translation entry */
143 	ltp = vi->lebs_to_pebs + lnum;
144 
145 	/* If the block is already assigned, check sqnum */
146 	if (__test_and_set_bit(lnum, vi->found)) {
147 		u32 cur_pnum = *ltp;
148 		struct ubi_vid_hdr *cur = ubi->blockinfo + cur_pnum;
149 
150 		/*
151 		 * If the current block hase not yet been scanned, we
152 		 * need to do that. The other block might be stale or
153 		 * the current block corrupted and the FM not yet
154 		 * updated.
155 		 */
156 		if (!test_bit(cur_pnum, ubi->scanned)) {
157 			/*
158 			 * If the scan fails, we use the valid block
159 			 */
160 			if (ubi_rescan_fm_vid_hdr(ubi, cur, cur_pnum, vol_id,
161 						  lnum)) {
162 				*ltp = pnum;
163 				return 0;
164 			}
165 		}
166 
167 		/*
168 		 * Should not happen ....
169 		 */
170 		if (test_bit(cur_pnum, ubi->corrupt)) {
171 			*ltp = pnum;
172 			return 0;
173 		}
174 
175 		ubi_dbg("Vol %u LEB %u PEB %u->sqnum %llu NPEB %u->sqnum %llu",
176 			vol_id, lnum, cur_pnum, be64_to_cpu(cur->sqnum), pnum,
177 			be64_to_cpu(vh->sqnum));
178 
179 		/*
180 		 * Compare sqnum and take the newer one
181 		 */
182 		if (be64_to_cpu(cur->sqnum) < be64_to_cpu(vh->sqnum))
183 			*ltp = pnum;
184 	} else {
185 		*ltp = pnum;
186 		if (lnum > vi->last_block)
187 			vi->last_block = lnum;
188 	}
189 
190 	return 0;
191 }
192 
ubi_scan_vid_hdr(struct ubi_scan_info * ubi,struct ubi_vid_hdr * vh,u32 pnum)193 static int ubi_scan_vid_hdr(struct ubi_scan_info *ubi, struct ubi_vid_hdr *vh,
194 			    u32 pnum)
195 {
196 	u32 vol_id, lnum;
197 	int res;
198 
199 	if (ubi_io_is_bad(ubi, pnum))
200 		return -EINVAL;
201 
202 	res = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
203 	if (res)
204 		return res;
205 
206 	/* Get volume id */
207 	vol_id = be32_to_cpu(vh->vol_id);
208 
209 	/* If this is the fastmap anchor, return right away */
210 	if (vol_id == UBI_FM_SB_VOLUME_ID)
211 		return ubi->fm_enabled ? UBI_FASTMAP_ANCHOR : 0;
212 
213 	/* We only care about static volumes with an id < UBI_SPL_VOL_IDS */
214 	if (vol_id >= UBI_SPL_VOL_IDS || vh->vol_type != UBI_VID_STATIC)
215 		return 0;
216 
217 	/* We are only interested in the volumes to load */
218 	if (!test_bit(vol_id, ubi->toload))
219 		return 0;
220 
221 	lnum = be32_to_cpu(vh->lnum);
222 	return ubi_add_peb_to_vol(ubi, vh, vol_id, pnum, lnum);
223 }
224 
assign_aeb_to_av(struct ubi_scan_info * ubi,u32 pnum,u32 lnum,u32 vol_id,u32 vol_type,u32 used)225 static int assign_aeb_to_av(struct ubi_scan_info *ubi, u32 pnum, u32 lnum,
226 			     u32 vol_id, u32 vol_type, u32 used)
227 {
228 	struct ubi_vid_hdr *vh;
229 
230 	if (ubi_io_is_bad(ubi, pnum))
231 		return -EINVAL;
232 
233 	ubi->fastmap_pebs++;
234 
235 	if (vol_id >= UBI_SPL_VOL_IDS || vol_type != UBI_STATIC_VOLUME)
236 		return 0;
237 
238 	/* We are only interested in the volumes to load */
239 	if (!test_bit(vol_id, ubi->toload))
240 		return 0;
241 
242 	vh = ubi->blockinfo + pnum;
243 
244 	return ubi_scan_vid_hdr(ubi, vh, pnum);
245 }
246 
scan_pool(struct ubi_scan_info * ubi,__be32 * pebs,int pool_size)247 static int scan_pool(struct ubi_scan_info *ubi, __be32 *pebs, int pool_size)
248 {
249 	struct ubi_vid_hdr *vh;
250 	u32 pnum;
251 	int i;
252 
253 	ubi_dbg("Scanning pool size: %d", pool_size);
254 
255 	for (i = 0; i < pool_size; i++) {
256 		pnum = be32_to_cpu(pebs[i]);
257 
258 		if (ubi_io_is_bad(ubi, pnum)) {
259 			ubi_err("FM: Bad PEB in fastmap pool! %u", pnum);
260 			return UBI_BAD_FASTMAP;
261 		}
262 
263 		vh = ubi->blockinfo + pnum;
264 		/*
265 		 * We allow the scan to fail here. The loader will notice
266 		 * and look for a replacement.
267 		 */
268 		ubi_scan_vid_hdr(ubi, vh, pnum);
269 	}
270 	return 0;
271 }
272 
273 /*
274  * Fastmap code is stolen from Linux kernel and this stub structure is used
275  * to make it happy.
276  */
277 struct ubi_attach_info {
278 	int i;
279 };
280 
ubi_attach_fastmap(struct ubi_scan_info * ubi,struct ubi_attach_info * ai,struct ubi_fastmap_layout * fm)281 static int ubi_attach_fastmap(struct ubi_scan_info *ubi,
282 			      struct ubi_attach_info *ai,
283 			      struct ubi_fastmap_layout *fm)
284 {
285 	struct ubi_fm_hdr *fmhdr;
286 	struct ubi_fm_scan_pool *fmpl1, *fmpl2;
287 	struct ubi_fm_ec *fmec;
288 	struct ubi_fm_volhdr *fmvhdr;
289 	struct ubi_fm_eba *fm_eba;
290 	int ret, i, j, pool_size, wl_pool_size;
291 	size_t fm_pos = 0, fm_size = ubi->fm_size;
292 	void *fm_raw = ubi->fm_buf;
293 
294 	memset(ubi->fm_used, 0, sizeof(ubi->fm_used));
295 
296 	fm_pos += sizeof(struct ubi_fm_sb);
297 	if (fm_pos >= fm_size)
298 		goto fail_bad;
299 
300 	fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
301 	fm_pos += sizeof(*fmhdr);
302 	if (fm_pos >= fm_size)
303 		goto fail_bad;
304 
305 	if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) {
306 		ubi_err("bad fastmap header magic: 0x%x, expected: 0x%x",
307 			be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC);
308 		goto fail_bad;
309 	}
310 
311 	fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
312 	fm_pos += sizeof(*fmpl1);
313 	if (fm_pos >= fm_size)
314 		goto fail_bad;
315 	if (be32_to_cpu(fmpl1->magic) != UBI_FM_POOL_MAGIC) {
316 		ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x",
317 			be32_to_cpu(fmpl1->magic), UBI_FM_POOL_MAGIC);
318 		goto fail_bad;
319 	}
320 
321 	fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
322 	fm_pos += sizeof(*fmpl2);
323 	if (fm_pos >= fm_size)
324 		goto fail_bad;
325 	if (be32_to_cpu(fmpl2->magic) != UBI_FM_POOL_MAGIC) {
326 		ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x",
327 			be32_to_cpu(fmpl2->magic), UBI_FM_POOL_MAGIC);
328 		goto fail_bad;
329 	}
330 
331 	pool_size = be16_to_cpu(fmpl1->size);
332 	wl_pool_size = be16_to_cpu(fmpl2->size);
333 	fm->max_pool_size = be16_to_cpu(fmpl1->max_size);
334 	fm->max_wl_pool_size = be16_to_cpu(fmpl2->max_size);
335 
336 	if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) {
337 		ubi_err("bad pool size: %i", pool_size);
338 		goto fail_bad;
339 	}
340 
341 	if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) {
342 		ubi_err("bad WL pool size: %i", wl_pool_size);
343 		goto fail_bad;
344 	}
345 
346 	if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE ||
347 	    fm->max_pool_size < 0) {
348 		ubi_err("bad maximal pool size: %i", fm->max_pool_size);
349 		goto fail_bad;
350 	}
351 
352 	if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE ||
353 	    fm->max_wl_pool_size < 0) {
354 		ubi_err("bad maximal WL pool size: %i", fm->max_wl_pool_size);
355 		goto fail_bad;
356 	}
357 
358 	/* read EC values from free list */
359 	for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) {
360 		fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
361 		fm_pos += sizeof(*fmec);
362 		if (fm_pos >= fm_size)
363 			goto fail_bad;
364 	}
365 
366 	/* read EC values from used list */
367 	for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) {
368 		fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
369 		fm_pos += sizeof(*fmec);
370 		if (fm_pos >= fm_size)
371 			goto fail_bad;
372 
373 		generic_set_bit(be32_to_cpu(fmec->pnum), ubi->fm_used);
374 	}
375 
376 	/* read EC values from scrub list */
377 	for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) {
378 		fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
379 		fm_pos += sizeof(*fmec);
380 		if (fm_pos >= fm_size)
381 			goto fail_bad;
382 	}
383 
384 	/* read EC values from erase list */
385 	for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) {
386 		fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
387 		fm_pos += sizeof(*fmec);
388 		if (fm_pos >= fm_size)
389 			goto fail_bad;
390 	}
391 
392 	/* Iterate over all volumes and read their EBA table */
393 	for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) {
394 		u32 vol_id, vol_type, used, reserved;
395 
396 		fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
397 		fm_pos += sizeof(*fmvhdr);
398 		if (fm_pos >= fm_size)
399 			goto fail_bad;
400 
401 		if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) {
402 			ubi_err("bad fastmap vol header magic: 0x%x, " \
403 				"expected: 0x%x",
404 				be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC);
405 			goto fail_bad;
406 		}
407 
408 		vol_id = be32_to_cpu(fmvhdr->vol_id);
409 		vol_type = fmvhdr->vol_type;
410 		used = be32_to_cpu(fmvhdr->used_ebs);
411 
412 		fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
413 		fm_pos += sizeof(*fm_eba);
414 		fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs));
415 		if (fm_pos >= fm_size)
416 			goto fail_bad;
417 
418 		if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) {
419 			ubi_err("bad fastmap EBA header magic: 0x%x, " \
420 				"expected: 0x%x",
421 				be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC);
422 			goto fail_bad;
423 		}
424 
425 		reserved = be32_to_cpu(fm_eba->reserved_pebs);
426 		ubi_dbg("FA: vol %u used %u res: %u", vol_id, used, reserved);
427 		for (j = 0; j < reserved; j++) {
428 			int pnum = be32_to_cpu(fm_eba->pnum[j]);
429 
430 			if ((int)be32_to_cpu(fm_eba->pnum[j]) < 0)
431 				continue;
432 
433 			if (!__test_and_clear_bit(pnum, ubi->fm_used))
434 				continue;
435 
436 			/*
437 			 * We only handle static volumes so used_ebs
438 			 * needs to be handed in. And we do not assign
439 			 * the reserved blocks
440 			 */
441 			if (j >= used)
442 				continue;
443 
444 			ret = assign_aeb_to_av(ubi, pnum, j, vol_id,
445 					       vol_type, used);
446 			if (!ret)
447 				continue;
448 
449 			/*
450 			 * Nasty: The fastmap claims that the volume
451 			 * has one block more than it, but that block
452 			 * is always empty and the other blocks have
453 			 * the correct number of total LEBs in the
454 			 * headers. Deal with it.
455 			 */
456 			if (ret != UBI_IO_FF && j != used - 1)
457 				goto fail_bad;
458 			ubi_dbg("FA: Vol: %u Ignoring empty LEB %d of %d",
459 				vol_id, j, used);
460 		}
461 	}
462 
463 	ret = scan_pool(ubi, fmpl1->pebs, pool_size);
464 	if (ret)
465 		goto fail;
466 
467 	ret = scan_pool(ubi, fmpl2->pebs, wl_pool_size);
468 	if (ret)
469 		goto fail;
470 
471 #ifdef CHECKME
472 	/*
473 	 * If fastmap is leaking PEBs (must not happen), raise a
474 	 * fat warning and fall back to scanning mode.
475 	 * We do this here because in ubi_wl_init() it's too late
476 	 * and we cannot fall back to scanning.
477 	 */
478 	if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count -
479 		    ai->bad_peb_count - fm->used_blocks))
480 		goto fail_bad;
481 #endif
482 
483 	return 0;
484 
485 fail_bad:
486 	ret = UBI_BAD_FASTMAP;
487 fail:
488 	return ret;
489 }
490 
ubi_scan_fastmap(struct ubi_scan_info * ubi,struct ubi_attach_info * ai,int fm_anchor)491 static int ubi_scan_fastmap(struct ubi_scan_info *ubi,
492 			    struct ubi_attach_info *ai,
493 			    int fm_anchor)
494 {
495 	struct ubi_fm_sb *fmsb, *fmsb2;
496 	struct ubi_vid_hdr *vh;
497 	struct ubi_fastmap_layout *fm;
498 	int i, used_blocks, pnum, ret = 0;
499 	size_t fm_size;
500 	__be32 crc, tmp_crc;
501 	unsigned long long sqnum = 0;
502 
503 	fmsb = &ubi->fm_sb;
504 	fm = &ubi->fm_layout;
505 
506 	ret = ubi_io_read(ubi, fmsb, fm_anchor, ubi->leb_start, sizeof(*fmsb));
507 	if (ret && ret != UBI_IO_BITFLIPS)
508 		goto free_fm_sb;
509 	else if (ret == UBI_IO_BITFLIPS)
510 		fm->to_be_tortured[0] = 1;
511 
512 	if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) {
513 		ubi_err("bad super block magic: 0x%x, expected: 0x%x",
514 			be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC);
515 		ret = UBI_BAD_FASTMAP;
516 		goto free_fm_sb;
517 	}
518 
519 	if (fmsb->version != UBI_FM_FMT_VERSION) {
520 		ubi_err("bad fastmap version: %i, expected: %i",
521 			fmsb->version, UBI_FM_FMT_VERSION);
522 		ret = UBI_BAD_FASTMAP;
523 		goto free_fm_sb;
524 	}
525 
526 	used_blocks = be32_to_cpu(fmsb->used_blocks);
527 	if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) {
528 		ubi_err("number of fastmap blocks is invalid: %i", used_blocks);
529 		ret = UBI_BAD_FASTMAP;
530 		goto free_fm_sb;
531 	}
532 
533 	fm_size = ubi->leb_size * used_blocks;
534 	if (fm_size != ubi->fm_size) {
535 		ubi_err("bad fastmap size: %zi, expected: %zi", fm_size,
536 			ubi->fm_size);
537 		ret = UBI_BAD_FASTMAP;
538 		goto free_fm_sb;
539 	}
540 
541 	vh = &ubi->fm_vh;
542 
543 	for (i = 0; i < used_blocks; i++) {
544 		pnum = be32_to_cpu(fmsb->block_loc[i]);
545 
546 		if (ubi_io_is_bad(ubi, pnum)) {
547 			ret = UBI_BAD_FASTMAP;
548 			goto free_hdr;
549 		}
550 
551 #ifdef LATER
552 		int image_seq;
553 		ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
554 		if (ret && ret != UBI_IO_BITFLIPS) {
555 			ubi_err("unable to read fastmap block# %i EC (PEB: %i)",
556 				i, pnum);
557 			if (ret > 0)
558 				ret = UBI_BAD_FASTMAP;
559 			goto free_hdr;
560 		} else if (ret == UBI_IO_BITFLIPS)
561 			fm->to_be_tortured[i] = 1;
562 
563 		image_seq = be32_to_cpu(ech->image_seq);
564 		if (!ubi->image_seq)
565 			ubi->image_seq = image_seq;
566 		/*
567 		 * Older UBI implementations have image_seq set to zero, so
568 		 * we shouldn't fail if image_seq == 0.
569 		 */
570 		if (image_seq && (image_seq != ubi->image_seq)) {
571 			ubi_err("wrong image seq:%d instead of %d",
572 				be32_to_cpu(ech->image_seq), ubi->image_seq);
573 			ret = UBI_BAD_FASTMAP;
574 			goto free_hdr;
575 		}
576 #endif
577 		ret = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
578 		if (ret && ret != UBI_IO_BITFLIPS) {
579 			ubi_err("unable to read fastmap block# %i (PEB: %i)",
580 				i, pnum);
581 			goto free_hdr;
582 		}
583 
584 		/*
585 		 * Mainline code rescans the anchor header. We've done
586 		 * that already so we merily copy it over.
587 		 */
588 		if (pnum == fm_anchor)
589 			memcpy(vh, ubi->blockinfo + pnum, sizeof(*fm));
590 
591 		if (i == 0) {
592 			if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) {
593 				ubi_err("bad fastmap anchor vol_id: 0x%x," \
594 					" expected: 0x%x",
595 					be32_to_cpu(vh->vol_id),
596 					UBI_FM_SB_VOLUME_ID);
597 				ret = UBI_BAD_FASTMAP;
598 				goto free_hdr;
599 			}
600 		} else {
601 			if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) {
602 				ubi_err("bad fastmap data vol_id: 0x%x," \
603 					" expected: 0x%x",
604 					be32_to_cpu(vh->vol_id),
605 					UBI_FM_DATA_VOLUME_ID);
606 				ret = UBI_BAD_FASTMAP;
607 				goto free_hdr;
608 			}
609 		}
610 
611 		if (sqnum < be64_to_cpu(vh->sqnum))
612 			sqnum = be64_to_cpu(vh->sqnum);
613 
614 		ret = ubi_io_read(ubi, ubi->fm_buf + (ubi->leb_size * i), pnum,
615 				  ubi->leb_start, ubi->leb_size);
616 		if (ret && ret != UBI_IO_BITFLIPS) {
617 			ubi_err("unable to read fastmap block# %i (PEB: %i, " \
618 				"err: %i)", i, pnum, ret);
619 			goto free_hdr;
620 		}
621 	}
622 
623 	fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf);
624 	tmp_crc = be32_to_cpu(fmsb2->data_crc);
625 	fmsb2->data_crc = 0;
626 	crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size);
627 	if (crc != tmp_crc) {
628 		ubi_err("fastmap data CRC is invalid");
629 		ubi_err("CRC should be: 0x%x, calc: 0x%x", tmp_crc, crc);
630 		ret = UBI_BAD_FASTMAP;
631 		goto free_hdr;
632 	}
633 
634 	fmsb2->sqnum = sqnum;
635 
636 	fm->used_blocks = used_blocks;
637 
638 	ret = ubi_attach_fastmap(ubi, ai, fm);
639 	if (ret) {
640 		if (ret > 0)
641 			ret = UBI_BAD_FASTMAP;
642 		goto free_hdr;
643 	}
644 
645 	ubi->fm = fm;
646 	ubi->fm_pool.max_size = ubi->fm->max_pool_size;
647 	ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size;
648 	ubi_msg("attached by fastmap %uMB %u blocks",
649 		ubi->fsize_mb, ubi->peb_count);
650 	ubi_dbg("fastmap pool size: %d", ubi->fm_pool.max_size);
651 	ubi_dbg("fastmap WL pool size: %d", ubi->fm_wl_pool.max_size);
652 
653 out:
654 	if (ret)
655 		ubi_err("Attach by fastmap failed, doing a full scan!");
656 	return ret;
657 
658 free_hdr:
659 free_fm_sb:
660 	goto out;
661 }
662 
663 /*
664  * Scan the flash and attempt to attach via fastmap
665  */
ipl_scan(struct ubi_scan_info * ubi)666 static void ipl_scan(struct ubi_scan_info *ubi)
667 {
668 	unsigned int pnum;
669 	int res;
670 
671 	/*
672 	 * Scan first for the fastmap super block
673 	 */
674 	for (pnum = 0; pnum < UBI_FM_MAX_START; pnum++) {
675 		res = ubi_scan_vid_hdr(ubi, ubi->blockinfo + pnum, pnum);
676 		/*
677 		 * We ignore errors here as we are meriliy scanning
678 		 * the headers.
679 		 */
680 		if (res != UBI_FASTMAP_ANCHOR)
681 			continue;
682 
683 		/*
684 		 * If fastmap is disabled, continue scanning. This
685 		 * might happen because the previous attempt failed or
686 		 * the caller disabled it right away.
687 		 */
688 		if (!ubi->fm_enabled)
689 			continue;
690 
691 		/*
692 		 * Try to attach the fastmap, if that fails continue
693 		 * scanning.
694 		 */
695 		if (!ubi_scan_fastmap(ubi, NULL, pnum))
696 			return;
697 		/*
698 		 * Fastmap failed. Clear everything we have and start
699 		 * over. We are paranoid and do not trust anything.
700 		 */
701 		memset(ubi->volinfo, 0, sizeof(ubi->volinfo));
702 		pnum = 0;
703 		break;
704 	}
705 
706 	/*
707 	 * Continue scanning, ignore errors, we might find what we are
708 	 * looking for,
709 	 */
710 	for (; pnum < ubi->peb_count; pnum++)
711 		ubi_scan_vid_hdr(ubi, ubi->blockinfo + pnum, pnum);
712 }
713 
714 /*
715  * Load a logical block of a volume into memory
716  */
ubi_load_block(struct ubi_scan_info * ubi,uint8_t * laddr,struct ubi_vol_info * vi,u32 vol_id,u32 lnum,u32 last)717 static int ubi_load_block(struct ubi_scan_info *ubi, uint8_t *laddr,
718 			  struct ubi_vol_info *vi, u32 vol_id, u32 lnum,
719 			  u32 last)
720 {
721 	struct ubi_vid_hdr *vh, *vrepl;
722 	u32 pnum, crc, dlen;
723 
724 retry:
725 	/*
726 	 * If this is a fastmap run, we try to rescan full, otherwise
727 	 * we simply give up.
728 	 */
729 	if (!test_bit(lnum, vi->found)) {
730 		ubi_warn("LEB %d of %d is missing", lnum, last);
731 		return -EINVAL;
732 	}
733 
734 	pnum = vi->lebs_to_pebs[lnum];
735 
736 	ubi_dbg("Load vol %u LEB %u PEB %u", vol_id, lnum, pnum);
737 
738 	if (ubi_io_is_bad(ubi, pnum)) {
739 		ubi_warn("Corrupted mapping block %d PB %d\n", lnum, pnum);
740 		return -EINVAL;
741 	}
742 
743 	if (test_bit(pnum, ubi->corrupt))
744 		goto find_other;
745 
746 	/*
747 	 * Lets try to read that block
748 	 */
749 	vh = ubi->blockinfo + pnum;
750 
751 	if (!test_bit(pnum, ubi->scanned)) {
752 		ubi_warn("Vol: %u LEB %u PEB %u not yet scanned", vol_id,
753 			 lnum, pnum);
754 		if (ubi_rescan_fm_vid_hdr(ubi, vh, pnum, vol_id, lnum))
755 			goto find_other;
756 	}
757 
758 	/*
759 	 * Check, if the total number of blocks is correct
760 	 */
761 	if (be32_to_cpu(vh->used_ebs) != last) {
762 		ubi_dbg("Block count missmatch.");
763 		ubi_dbg("vh->used_ebs: %d nrblocks: %d",
764 			be32_to_cpu(vh->used_ebs), last);
765 		generic_set_bit(pnum, ubi->corrupt);
766 		goto find_other;
767 	}
768 
769 	/*
770 	 * Get the data length of this block.
771 	 */
772 	dlen = be32_to_cpu(vh->data_size);
773 
774 	/*
775 	 * Read the data into RAM. We ignore the return value
776 	 * here as the only thing which might go wrong are
777 	 * bitflips. Try nevertheless.
778 	 */
779 	ubi_io_read(ubi, laddr, pnum, ubi->leb_start, dlen);
780 
781 	/* Calculate CRC over the data */
782 	crc = crc32(UBI_CRC32_INIT, laddr, dlen);
783 
784 	if (crc != be32_to_cpu(vh->data_crc)) {
785 		ubi_warn("Vol: %u LEB %u PEB %u data CRC failure", vol_id,
786 			 lnum, pnum);
787 		generic_set_bit(pnum, ubi->corrupt);
788 		goto find_other;
789 	}
790 
791 	/* We are good. Return the data length we read */
792 	return dlen;
793 
794 find_other:
795 	ubi_dbg("Find replacement for LEB %u PEB %u", lnum, pnum);
796 	generic_clear_bit(lnum, vi->found);
797 	vrepl = NULL;
798 
799 	for (pnum = 0; pnum < ubi->peb_count; pnum++) {
800 		struct ubi_vid_hdr *tmp = ubi->blockinfo + pnum;
801 		u32 t_vol_id = be32_to_cpu(tmp->vol_id);
802 		u32 t_lnum = be32_to_cpu(tmp->lnum);
803 
804 		if (test_bit(pnum, ubi->corrupt))
805 			continue;
806 
807 		if (t_vol_id != vol_id || t_lnum != lnum)
808 			continue;
809 
810 		if (!test_bit(pnum, ubi->scanned)) {
811 			ubi_warn("Vol: %u LEB %u PEB %u not yet scanned",
812 				 vol_id, lnum, pnum);
813 			if (ubi_rescan_fm_vid_hdr(ubi, tmp, pnum, vol_id, lnum))
814 				continue;
815 		}
816 
817 		/*
818 		 * We found one. If its the first, assign it otherwise
819 		 * compare the sqnum
820 		 */
821 		generic_set_bit(lnum, vi->found);
822 
823 		if (!vrepl) {
824 			vrepl = tmp;
825 			continue;
826 		}
827 
828 		if (be64_to_cpu(vrepl->sqnum) < be64_to_cpu(tmp->sqnum))
829 			vrepl = tmp;
830 	}
831 
832 	if (vrepl) {
833 		/* Update the vi table */
834 		pnum = vrepl - ubi->blockinfo;
835 		vi->lebs_to_pebs[lnum] = pnum;
836 		ubi_dbg("Trying PEB %u for LEB %u", pnum, lnum);
837 		vh = vrepl;
838 	}
839 	goto retry;
840 }
841 
842 /*
843  * Load a volume into RAM
844  */
ipl_load(struct ubi_scan_info * ubi,const u32 vol_id,uint8_t * laddr)845 static int ipl_load(struct ubi_scan_info *ubi, const u32 vol_id, uint8_t *laddr)
846 {
847 	struct ubi_vol_info *vi;
848 	u32 lnum, last, len;
849 
850 	if (vol_id >= UBI_SPL_VOL_IDS)
851 		return -EINVAL;
852 
853 	len = 0;
854 	vi = ubi->volinfo + vol_id;
855 	last = vi->last_block + 1;
856 
857 	/* Read the blocks to RAM, check CRC */
858 	for (lnum = 0 ; lnum < last; lnum++) {
859 		int res = ubi_load_block(ubi, laddr, vi, vol_id, lnum, last);
860 
861 		if (res < 0) {
862 			ubi_warn("Failed to load volume %u", vol_id);
863 			return res;
864 		}
865 		/* res is the data length of the read block */
866 		laddr += res;
867 		len += res;
868 	}
869 	return len;
870 }
871 
ubispl_load_volumes(struct ubispl_info * info,struct ubispl_load * lvols,int nrvols)872 int ubispl_load_volumes(struct ubispl_info *info, struct ubispl_load *lvols,
873 			int nrvols)
874 {
875 	struct ubi_scan_info *ubi = info->ubi;
876 	int res, i, fastmap = info->fastmap;
877 	u32 fsize;
878 
879 retry:
880 	/*
881 	 * We do a partial initializiation of @ubi. Cleaning fm_buf is
882 	 * not necessary.
883 	 */
884 	memset(ubi, 0, offsetof(struct ubi_scan_info, fm_buf));
885 
886 	ubi->read = info->read;
887 
888 	/* Precalculate the offsets */
889 	ubi->vid_offset = info->vid_offset;
890 	ubi->leb_start = info->leb_start;
891 	ubi->leb_size = info->peb_size - ubi->leb_start;
892 	ubi->peb_count = info->peb_count;
893 	ubi->peb_offset = info->peb_offset;
894 
895 	fsize = info->peb_size * info->peb_count;
896 	ubi->fsize_mb = fsize >> 20;
897 
898 	/* Fastmap init */
899 	ubi->fm_size = ubi_calc_fm_size(ubi);
900 	ubi->fm_enabled = fastmap;
901 
902 	for (i = 0; i < nrvols; i++) {
903 		struct ubispl_load *lv = lvols + i;
904 
905 		generic_set_bit(lv->vol_id, ubi->toload);
906 	}
907 
908 	ipl_scan(ubi);
909 
910 	for (i = 0; i < nrvols; i++) {
911 		struct ubispl_load *lv = lvols + i;
912 
913 		ubi_msg("Loading VolId #%d", lv->vol_id);
914 		res = ipl_load(ubi, lv->vol_id, lv->load_addr);
915 		if (res < 0) {
916 			if (fastmap) {
917 				fastmap = 0;
918 				goto retry;
919 			}
920 			ubi_warn("Failed");
921 			return res;
922 		}
923 	}
924 	return 0;
925 }
926