xref: /openbmc/linux/drivers/mtd/ubi/fastmap.c (revision d6fc9fcb)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2012 Linutronix GmbH
4  * Copyright (c) 2014 sigma star gmbh
5  * Author: Richard Weinberger <richard@nod.at>
6  */
7 
8 #include <linux/crc32.h>
9 #include <linux/bitmap.h>
10 #include "ubi.h"
11 
12 /**
13  * init_seen - allocate memory for used for debugging.
14  * @ubi: UBI device description object
15  */
16 static inline unsigned long *init_seen(struct ubi_device *ubi)
17 {
18 	unsigned long *ret;
19 
20 	if (!ubi_dbg_chk_fastmap(ubi))
21 		return NULL;
22 
23 	ret = kcalloc(BITS_TO_LONGS(ubi->peb_count), sizeof(unsigned long),
24 		      GFP_KERNEL);
25 	if (!ret)
26 		return ERR_PTR(-ENOMEM);
27 
28 	return ret;
29 }
30 
31 /**
32  * free_seen - free the seen logic integer array.
33  * @seen: integer array of @ubi->peb_count size
34  */
35 static inline void free_seen(unsigned long *seen)
36 {
37 	kfree(seen);
38 }
39 
40 /**
41  * set_seen - mark a PEB as seen.
42  * @ubi: UBI device description object
43  * @pnum: The PEB to be makred as seen
44  * @seen: integer array of @ubi->peb_count size
45  */
46 static inline void set_seen(struct ubi_device *ubi, int pnum, unsigned long *seen)
47 {
48 	if (!ubi_dbg_chk_fastmap(ubi) || !seen)
49 		return;
50 
51 	set_bit(pnum, seen);
52 }
53 
54 /**
55  * self_check_seen - check whether all PEB have been seen by fastmap.
56  * @ubi: UBI device description object
57  * @seen: integer array of @ubi->peb_count size
58  */
59 static int self_check_seen(struct ubi_device *ubi, unsigned long *seen)
60 {
61 	int pnum, ret = 0;
62 
63 	if (!ubi_dbg_chk_fastmap(ubi) || !seen)
64 		return 0;
65 
66 	for (pnum = 0; pnum < ubi->peb_count; pnum++) {
67 		if (test_bit(pnum, seen) && ubi->lookuptbl[pnum]) {
68 			ubi_err(ubi, "self-check failed for PEB %d, fastmap didn't see it", pnum);
69 			ret = -EINVAL;
70 		}
71 	}
72 
73 	return ret;
74 }
75 
76 /**
77  * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device.
78  * @ubi: UBI device description object
79  */
80 size_t ubi_calc_fm_size(struct ubi_device *ubi)
81 {
82 	size_t size;
83 
84 	size = sizeof(struct ubi_fm_sb) +
85 		sizeof(struct ubi_fm_hdr) +
86 		sizeof(struct ubi_fm_scan_pool) +
87 		sizeof(struct ubi_fm_scan_pool) +
88 		(ubi->peb_count * sizeof(struct ubi_fm_ec)) +
89 		(sizeof(struct ubi_fm_eba) +
90 		(ubi->peb_count * sizeof(__be32))) +
91 		sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
92 	return roundup(size, ubi->leb_size);
93 }
94 
95 
96 /**
97  * new_fm_vhdr - allocate a new volume header for fastmap usage.
98  * @ubi: UBI device description object
99  * @vol_id: the VID of the new header
100  *
101  * Returns a new struct ubi_vid_hdr on success.
102  * NULL indicates out of memory.
103  */
104 static struct ubi_vid_io_buf *new_fm_vbuf(struct ubi_device *ubi, int vol_id)
105 {
106 	struct ubi_vid_io_buf *new;
107 	struct ubi_vid_hdr *vh;
108 
109 	new = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
110 	if (!new)
111 		goto out;
112 
113 	vh = ubi_get_vid_hdr(new);
114 	vh->vol_type = UBI_VID_DYNAMIC;
115 	vh->vol_id = cpu_to_be32(vol_id);
116 
117 	/* UBI implementations without fastmap support have to delete the
118 	 * fastmap.
119 	 */
120 	vh->compat = UBI_COMPAT_DELETE;
121 
122 out:
123 	return new;
124 }
125 
126 /**
127  * add_aeb - create and add a attach erase block to a given list.
128  * @ai: UBI attach info object
129  * @list: the target list
130  * @pnum: PEB number of the new attach erase block
131  * @ec: erease counter of the new LEB
132  * @scrub: scrub this PEB after attaching
133  *
134  * Returns 0 on success, < 0 indicates an internal error.
135  */
136 static int add_aeb(struct ubi_attach_info *ai, struct list_head *list,
137 		   int pnum, int ec, int scrub)
138 {
139 	struct ubi_ainf_peb *aeb;
140 
141 	aeb = ubi_alloc_aeb(ai, pnum, ec);
142 	if (!aeb)
143 		return -ENOMEM;
144 
145 	aeb->lnum = -1;
146 	aeb->scrub = scrub;
147 	aeb->copy_flag = aeb->sqnum = 0;
148 
149 	ai->ec_sum += aeb->ec;
150 	ai->ec_count++;
151 
152 	if (ai->max_ec < aeb->ec)
153 		ai->max_ec = aeb->ec;
154 
155 	if (ai->min_ec > aeb->ec)
156 		ai->min_ec = aeb->ec;
157 
158 	list_add_tail(&aeb->u.list, list);
159 
160 	return 0;
161 }
162 
163 /**
164  * add_vol - create and add a new volume to ubi_attach_info.
165  * @ai: ubi_attach_info object
166  * @vol_id: VID of the new volume
167  * @used_ebs: number of used EBS
168  * @data_pad: data padding value of the new volume
169  * @vol_type: volume type
170  * @last_eb_bytes: number of bytes in the last LEB
171  *
172  * Returns the new struct ubi_ainf_volume on success.
173  * NULL indicates an error.
174  */
175 static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
176 				       int used_ebs, int data_pad, u8 vol_type,
177 				       int last_eb_bytes)
178 {
179 	struct ubi_ainf_volume *av;
180 
181 	av = ubi_add_av(ai, vol_id);
182 	if (IS_ERR(av))
183 		return av;
184 
185 	av->data_pad = data_pad;
186 	av->last_data_size = last_eb_bytes;
187 	av->compat = 0;
188 	av->vol_type = vol_type;
189 	if (av->vol_type == UBI_STATIC_VOLUME)
190 		av->used_ebs = used_ebs;
191 
192 	dbg_bld("found volume (ID %i)", vol_id);
193 	return av;
194 }
195 
196 /**
197  * assign_aeb_to_av - assigns a SEB to a given ainf_volume and removes it
198  * from it's original list.
199  * @ai: ubi_attach_info object
200  * @aeb: the to be assigned SEB
201  * @av: target scan volume
202  */
203 static void assign_aeb_to_av(struct ubi_attach_info *ai,
204 			     struct ubi_ainf_peb *aeb,
205 			     struct ubi_ainf_volume *av)
206 {
207 	struct ubi_ainf_peb *tmp_aeb;
208 	struct rb_node **p = &av->root.rb_node, *parent = NULL;
209 
210 	while (*p) {
211 		parent = *p;
212 
213 		tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
214 		if (aeb->lnum != tmp_aeb->lnum) {
215 			if (aeb->lnum < tmp_aeb->lnum)
216 				p = &(*p)->rb_left;
217 			else
218 				p = &(*p)->rb_right;
219 
220 			continue;
221 		} else
222 			break;
223 	}
224 
225 	list_del(&aeb->u.list);
226 	av->leb_count++;
227 
228 	rb_link_node(&aeb->u.rb, parent, p);
229 	rb_insert_color(&aeb->u.rb, &av->root);
230 }
231 
232 /**
233  * update_vol - inserts or updates a LEB which was found a pool.
234  * @ubi: the UBI device object
235  * @ai: attach info object
236  * @av: the volume this LEB belongs to
237  * @new_vh: the volume header derived from new_aeb
238  * @new_aeb: the AEB to be examined
239  *
240  * Returns 0 on success, < 0 indicates an internal error.
241  */
242 static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
243 		      struct ubi_ainf_volume *av, struct ubi_vid_hdr *new_vh,
244 		      struct ubi_ainf_peb *new_aeb)
245 {
246 	struct rb_node **p = &av->root.rb_node, *parent = NULL;
247 	struct ubi_ainf_peb *aeb, *victim;
248 	int cmp_res;
249 
250 	while (*p) {
251 		parent = *p;
252 		aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
253 
254 		if (be32_to_cpu(new_vh->lnum) != aeb->lnum) {
255 			if (be32_to_cpu(new_vh->lnum) < aeb->lnum)
256 				p = &(*p)->rb_left;
257 			else
258 				p = &(*p)->rb_right;
259 
260 			continue;
261 		}
262 
263 		/* This case can happen if the fastmap gets written
264 		 * because of a volume change (creation, deletion, ..).
265 		 * Then a PEB can be within the persistent EBA and the pool.
266 		 */
267 		if (aeb->pnum == new_aeb->pnum) {
268 			ubi_assert(aeb->lnum == new_aeb->lnum);
269 			ubi_free_aeb(ai, new_aeb);
270 
271 			return 0;
272 		}
273 
274 		cmp_res = ubi_compare_lebs(ubi, aeb, new_aeb->pnum, new_vh);
275 		if (cmp_res < 0)
276 			return cmp_res;
277 
278 		/* new_aeb is newer */
279 		if (cmp_res & 1) {
280 			victim = ubi_alloc_aeb(ai, aeb->pnum, aeb->ec);
281 			if (!victim)
282 				return -ENOMEM;
283 
284 			list_add_tail(&victim->u.list, &ai->erase);
285 
286 			if (av->highest_lnum == be32_to_cpu(new_vh->lnum))
287 				av->last_data_size =
288 					be32_to_cpu(new_vh->data_size);
289 
290 			dbg_bld("vol %i: AEB %i's PEB %i is the newer",
291 				av->vol_id, aeb->lnum, new_aeb->pnum);
292 
293 			aeb->ec = new_aeb->ec;
294 			aeb->pnum = new_aeb->pnum;
295 			aeb->copy_flag = new_vh->copy_flag;
296 			aeb->scrub = new_aeb->scrub;
297 			aeb->sqnum = new_aeb->sqnum;
298 			ubi_free_aeb(ai, new_aeb);
299 
300 		/* new_aeb is older */
301 		} else {
302 			dbg_bld("vol %i: AEB %i's PEB %i is old, dropping it",
303 				av->vol_id, aeb->lnum, new_aeb->pnum);
304 			list_add_tail(&new_aeb->u.list, &ai->erase);
305 		}
306 
307 		return 0;
308 	}
309 	/* This LEB is new, let's add it to the volume */
310 
311 	if (av->highest_lnum <= be32_to_cpu(new_vh->lnum)) {
312 		av->highest_lnum = be32_to_cpu(new_vh->lnum);
313 		av->last_data_size = be32_to_cpu(new_vh->data_size);
314 	}
315 
316 	if (av->vol_type == UBI_STATIC_VOLUME)
317 		av->used_ebs = be32_to_cpu(new_vh->used_ebs);
318 
319 	av->leb_count++;
320 
321 	rb_link_node(&new_aeb->u.rb, parent, p);
322 	rb_insert_color(&new_aeb->u.rb, &av->root);
323 
324 	return 0;
325 }
326 
327 /**
328  * process_pool_aeb - we found a non-empty PEB in a pool.
329  * @ubi: UBI device object
330  * @ai: attach info object
331  * @new_vh: the volume header derived from new_aeb
332  * @new_aeb: the AEB to be examined
333  *
334  * Returns 0 on success, < 0 indicates an internal error.
335  */
336 static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
337 			    struct ubi_vid_hdr *new_vh,
338 			    struct ubi_ainf_peb *new_aeb)
339 {
340 	int vol_id = be32_to_cpu(new_vh->vol_id);
341 	struct ubi_ainf_volume *av;
342 
343 	if (vol_id == UBI_FM_SB_VOLUME_ID || vol_id == UBI_FM_DATA_VOLUME_ID) {
344 		ubi_free_aeb(ai, new_aeb);
345 
346 		return 0;
347 	}
348 
349 	/* Find the volume this SEB belongs to */
350 	av = ubi_find_av(ai, vol_id);
351 	if (!av) {
352 		ubi_err(ubi, "orphaned volume in fastmap pool!");
353 		ubi_free_aeb(ai, new_aeb);
354 		return UBI_BAD_FASTMAP;
355 	}
356 
357 	ubi_assert(vol_id == av->vol_id);
358 
359 	return update_vol(ubi, ai, av, new_vh, new_aeb);
360 }
361 
362 /**
363  * unmap_peb - unmap a PEB.
364  * If fastmap detects a free PEB in the pool it has to check whether
365  * this PEB has been unmapped after writing the fastmap.
366  *
367  * @ai: UBI attach info object
368  * @pnum: The PEB to be unmapped
369  */
370 static void unmap_peb(struct ubi_attach_info *ai, int pnum)
371 {
372 	struct ubi_ainf_volume *av;
373 	struct rb_node *node, *node2;
374 	struct ubi_ainf_peb *aeb;
375 
376 	ubi_rb_for_each_entry(node, av, &ai->volumes, rb) {
377 		ubi_rb_for_each_entry(node2, aeb, &av->root, u.rb) {
378 			if (aeb->pnum == pnum) {
379 				rb_erase(&aeb->u.rb, &av->root);
380 				av->leb_count--;
381 				ubi_free_aeb(ai, aeb);
382 				return;
383 			}
384 		}
385 	}
386 }
387 
388 /**
389  * scan_pool - scans a pool for changed (no longer empty PEBs).
390  * @ubi: UBI device object
391  * @ai: attach info object
392  * @pebs: an array of all PEB numbers in the to be scanned pool
393  * @pool_size: size of the pool (number of entries in @pebs)
394  * @max_sqnum: pointer to the maximal sequence number
395  * @free: list of PEBs which are most likely free (and go into @ai->free)
396  *
397  * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned.
398  * < 0 indicates an internal error.
399  */
400 static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
401 		     __be32 *pebs, int pool_size, unsigned long long *max_sqnum,
402 		     struct list_head *free)
403 {
404 	struct ubi_vid_io_buf *vb;
405 	struct ubi_vid_hdr *vh;
406 	struct ubi_ec_hdr *ech;
407 	struct ubi_ainf_peb *new_aeb;
408 	int i, pnum, err, ret = 0;
409 
410 	ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
411 	if (!ech)
412 		return -ENOMEM;
413 
414 	vb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
415 	if (!vb) {
416 		kfree(ech);
417 		return -ENOMEM;
418 	}
419 
420 	vh = ubi_get_vid_hdr(vb);
421 
422 	dbg_bld("scanning fastmap pool: size = %i", pool_size);
423 
424 	/*
425 	 * Now scan all PEBs in the pool to find changes which have been made
426 	 * after the creation of the fastmap
427 	 */
428 	for (i = 0; i < pool_size; i++) {
429 		int scrub = 0;
430 		int image_seq;
431 
432 		pnum = be32_to_cpu(pebs[i]);
433 
434 		if (ubi_io_is_bad(ubi, pnum)) {
435 			ubi_err(ubi, "bad PEB in fastmap pool!");
436 			ret = UBI_BAD_FASTMAP;
437 			goto out;
438 		}
439 
440 		err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
441 		if (err && err != UBI_IO_BITFLIPS) {
442 			ubi_err(ubi, "unable to read EC header! PEB:%i err:%i",
443 				pnum, err);
444 			ret = err > 0 ? UBI_BAD_FASTMAP : err;
445 			goto out;
446 		} else if (err == UBI_IO_BITFLIPS)
447 			scrub = 1;
448 
449 		/*
450 		 * Older UBI implementations have image_seq set to zero, so
451 		 * we shouldn't fail if image_seq == 0.
452 		 */
453 		image_seq = be32_to_cpu(ech->image_seq);
454 
455 		if (image_seq && (image_seq != ubi->image_seq)) {
456 			ubi_err(ubi, "bad image seq: 0x%x, expected: 0x%x",
457 				be32_to_cpu(ech->image_seq), ubi->image_seq);
458 			ret = UBI_BAD_FASTMAP;
459 			goto out;
460 		}
461 
462 		err = ubi_io_read_vid_hdr(ubi, pnum, vb, 0);
463 		if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) {
464 			unsigned long long ec = be64_to_cpu(ech->ec);
465 			unmap_peb(ai, pnum);
466 			dbg_bld("Adding PEB to free: %i", pnum);
467 
468 			if (err == UBI_IO_FF_BITFLIPS)
469 				scrub = 1;
470 
471 			add_aeb(ai, free, pnum, ec, scrub);
472 			continue;
473 		} else if (err == 0 || err == UBI_IO_BITFLIPS) {
474 			dbg_bld("Found non empty PEB:%i in pool", pnum);
475 
476 			if (err == UBI_IO_BITFLIPS)
477 				scrub = 1;
478 
479 			new_aeb = ubi_alloc_aeb(ai, pnum, be64_to_cpu(ech->ec));
480 			if (!new_aeb) {
481 				ret = -ENOMEM;
482 				goto out;
483 			}
484 
485 			new_aeb->lnum = be32_to_cpu(vh->lnum);
486 			new_aeb->sqnum = be64_to_cpu(vh->sqnum);
487 			new_aeb->copy_flag = vh->copy_flag;
488 			new_aeb->scrub = scrub;
489 
490 			if (*max_sqnum < new_aeb->sqnum)
491 				*max_sqnum = new_aeb->sqnum;
492 
493 			err = process_pool_aeb(ubi, ai, vh, new_aeb);
494 			if (err) {
495 				ret = err > 0 ? UBI_BAD_FASTMAP : err;
496 				goto out;
497 			}
498 		} else {
499 			/* We are paranoid and fall back to scanning mode */
500 			ubi_err(ubi, "fastmap pool PEBs contains damaged PEBs!");
501 			ret = err > 0 ? UBI_BAD_FASTMAP : err;
502 			goto out;
503 		}
504 
505 	}
506 
507 out:
508 	ubi_free_vid_buf(vb);
509 	kfree(ech);
510 	return ret;
511 }
512 
513 /**
514  * count_fastmap_pebs - Counts the PEBs found by fastmap.
515  * @ai: The UBI attach info object
516  */
517 static int count_fastmap_pebs(struct ubi_attach_info *ai)
518 {
519 	struct ubi_ainf_peb *aeb;
520 	struct ubi_ainf_volume *av;
521 	struct rb_node *rb1, *rb2;
522 	int n = 0;
523 
524 	list_for_each_entry(aeb, &ai->erase, u.list)
525 		n++;
526 
527 	list_for_each_entry(aeb, &ai->free, u.list)
528 		n++;
529 
530 	ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
531 		ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
532 			n++;
533 
534 	return n;
535 }
536 
537 /**
538  * ubi_attach_fastmap - creates ubi_attach_info from a fastmap.
539  * @ubi: UBI device object
540  * @ai: UBI attach info object
541  * @fm: the fastmap to be attached
542  *
543  * Returns 0 on success, UBI_BAD_FASTMAP if the found fastmap was unusable.
544  * < 0 indicates an internal error.
545  */
546 static int ubi_attach_fastmap(struct ubi_device *ubi,
547 			      struct ubi_attach_info *ai,
548 			      struct ubi_fastmap_layout *fm)
549 {
550 	struct list_head used, free;
551 	struct ubi_ainf_volume *av;
552 	struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb;
553 	struct ubi_fm_sb *fmsb;
554 	struct ubi_fm_hdr *fmhdr;
555 	struct ubi_fm_scan_pool *fmpl, *fmpl_wl;
556 	struct ubi_fm_ec *fmec;
557 	struct ubi_fm_volhdr *fmvhdr;
558 	struct ubi_fm_eba *fm_eba;
559 	int ret, i, j, pool_size, wl_pool_size;
560 	size_t fm_pos = 0, fm_size = ubi->fm_size;
561 	unsigned long long max_sqnum = 0;
562 	void *fm_raw = ubi->fm_buf;
563 
564 	INIT_LIST_HEAD(&used);
565 	INIT_LIST_HEAD(&free);
566 	ai->min_ec = UBI_MAX_ERASECOUNTER;
567 
568 	fmsb = (struct ubi_fm_sb *)(fm_raw);
569 	ai->max_sqnum = fmsb->sqnum;
570 	fm_pos += sizeof(struct ubi_fm_sb);
571 	if (fm_pos >= fm_size)
572 		goto fail_bad;
573 
574 	fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
575 	fm_pos += sizeof(*fmhdr);
576 	if (fm_pos >= fm_size)
577 		goto fail_bad;
578 
579 	if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) {
580 		ubi_err(ubi, "bad fastmap header magic: 0x%x, expected: 0x%x",
581 			be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC);
582 		goto fail_bad;
583 	}
584 
585 	fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
586 	fm_pos += sizeof(*fmpl);
587 	if (fm_pos >= fm_size)
588 		goto fail_bad;
589 	if (be32_to_cpu(fmpl->magic) != UBI_FM_POOL_MAGIC) {
590 		ubi_err(ubi, "bad fastmap pool magic: 0x%x, expected: 0x%x",
591 			be32_to_cpu(fmpl->magic), UBI_FM_POOL_MAGIC);
592 		goto fail_bad;
593 	}
594 
595 	fmpl_wl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
596 	fm_pos += sizeof(*fmpl_wl);
597 	if (fm_pos >= fm_size)
598 		goto fail_bad;
599 	if (be32_to_cpu(fmpl_wl->magic) != UBI_FM_POOL_MAGIC) {
600 		ubi_err(ubi, "bad fastmap WL pool magic: 0x%x, expected: 0x%x",
601 			be32_to_cpu(fmpl_wl->magic), UBI_FM_POOL_MAGIC);
602 		goto fail_bad;
603 	}
604 
605 	pool_size = be16_to_cpu(fmpl->size);
606 	wl_pool_size = be16_to_cpu(fmpl_wl->size);
607 	fm->max_pool_size = be16_to_cpu(fmpl->max_size);
608 	fm->max_wl_pool_size = be16_to_cpu(fmpl_wl->max_size);
609 
610 	if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) {
611 		ubi_err(ubi, "bad pool size: %i", pool_size);
612 		goto fail_bad;
613 	}
614 
615 	if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) {
616 		ubi_err(ubi, "bad WL pool size: %i", wl_pool_size);
617 		goto fail_bad;
618 	}
619 
620 
621 	if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE ||
622 	    fm->max_pool_size < 0) {
623 		ubi_err(ubi, "bad maximal pool size: %i", fm->max_pool_size);
624 		goto fail_bad;
625 	}
626 
627 	if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE ||
628 	    fm->max_wl_pool_size < 0) {
629 		ubi_err(ubi, "bad maximal WL pool size: %i",
630 			fm->max_wl_pool_size);
631 		goto fail_bad;
632 	}
633 
634 	/* read EC values from free list */
635 	for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) {
636 		fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
637 		fm_pos += sizeof(*fmec);
638 		if (fm_pos >= fm_size)
639 			goto fail_bad;
640 
641 		add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
642 			be32_to_cpu(fmec->ec), 0);
643 	}
644 
645 	/* read EC values from used list */
646 	for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) {
647 		fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
648 		fm_pos += sizeof(*fmec);
649 		if (fm_pos >= fm_size)
650 			goto fail_bad;
651 
652 		add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
653 			be32_to_cpu(fmec->ec), 0);
654 	}
655 
656 	/* read EC values from scrub list */
657 	for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) {
658 		fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
659 		fm_pos += sizeof(*fmec);
660 		if (fm_pos >= fm_size)
661 			goto fail_bad;
662 
663 		add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
664 			be32_to_cpu(fmec->ec), 1);
665 	}
666 
667 	/* read EC values from erase list */
668 	for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) {
669 		fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
670 		fm_pos += sizeof(*fmec);
671 		if (fm_pos >= fm_size)
672 			goto fail_bad;
673 
674 		add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
675 			be32_to_cpu(fmec->ec), 1);
676 	}
677 
678 	ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
679 	ai->bad_peb_count = be32_to_cpu(fmhdr->bad_peb_count);
680 
681 	/* Iterate over all volumes and read their EBA table */
682 	for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) {
683 		fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
684 		fm_pos += sizeof(*fmvhdr);
685 		if (fm_pos >= fm_size)
686 			goto fail_bad;
687 
688 		if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) {
689 			ubi_err(ubi, "bad fastmap vol header magic: 0x%x, expected: 0x%x",
690 				be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC);
691 			goto fail_bad;
692 		}
693 
694 		av = add_vol(ai, be32_to_cpu(fmvhdr->vol_id),
695 			     be32_to_cpu(fmvhdr->used_ebs),
696 			     be32_to_cpu(fmvhdr->data_pad),
697 			     fmvhdr->vol_type,
698 			     be32_to_cpu(fmvhdr->last_eb_bytes));
699 
700 		if (IS_ERR(av)) {
701 			if (PTR_ERR(av) == -EEXIST)
702 				ubi_err(ubi, "volume (ID %i) already exists",
703 					fmvhdr->vol_id);
704 
705 			goto fail_bad;
706 		}
707 
708 		ai->vols_found++;
709 		if (ai->highest_vol_id < be32_to_cpu(fmvhdr->vol_id))
710 			ai->highest_vol_id = be32_to_cpu(fmvhdr->vol_id);
711 
712 		fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
713 		fm_pos += sizeof(*fm_eba);
714 		fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs));
715 		if (fm_pos >= fm_size)
716 			goto fail_bad;
717 
718 		if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) {
719 			ubi_err(ubi, "bad fastmap EBA header magic: 0x%x, expected: 0x%x",
720 				be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC);
721 			goto fail_bad;
722 		}
723 
724 		for (j = 0; j < be32_to_cpu(fm_eba->reserved_pebs); j++) {
725 			int pnum = be32_to_cpu(fm_eba->pnum[j]);
726 
727 			if (pnum < 0)
728 				continue;
729 
730 			aeb = NULL;
731 			list_for_each_entry(tmp_aeb, &used, u.list) {
732 				if (tmp_aeb->pnum == pnum) {
733 					aeb = tmp_aeb;
734 					break;
735 				}
736 			}
737 
738 			if (!aeb) {
739 				ubi_err(ubi, "PEB %i is in EBA but not in used list", pnum);
740 				goto fail_bad;
741 			}
742 
743 			aeb->lnum = j;
744 
745 			if (av->highest_lnum <= aeb->lnum)
746 				av->highest_lnum = aeb->lnum;
747 
748 			assign_aeb_to_av(ai, aeb, av);
749 
750 			dbg_bld("inserting PEB:%i (LEB %i) to vol %i",
751 				aeb->pnum, aeb->lnum, av->vol_id);
752 		}
753 	}
754 
755 	ret = scan_pool(ubi, ai, fmpl->pebs, pool_size, &max_sqnum, &free);
756 	if (ret)
757 		goto fail;
758 
759 	ret = scan_pool(ubi, ai, fmpl_wl->pebs, wl_pool_size, &max_sqnum, &free);
760 	if (ret)
761 		goto fail;
762 
763 	if (max_sqnum > ai->max_sqnum)
764 		ai->max_sqnum = max_sqnum;
765 
766 	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list)
767 		list_move_tail(&tmp_aeb->u.list, &ai->free);
768 
769 	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list)
770 		list_move_tail(&tmp_aeb->u.list, &ai->erase);
771 
772 	ubi_assert(list_empty(&free));
773 
774 	/*
775 	 * If fastmap is leaking PEBs (must not happen), raise a
776 	 * fat warning and fall back to scanning mode.
777 	 * We do this here because in ubi_wl_init() it's too late
778 	 * and we cannot fall back to scanning.
779 	 */
780 	if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count -
781 		    ai->bad_peb_count - fm->used_blocks))
782 		goto fail_bad;
783 
784 	return 0;
785 
786 fail_bad:
787 	ret = UBI_BAD_FASTMAP;
788 fail:
789 	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) {
790 		list_del(&tmp_aeb->u.list);
791 		ubi_free_aeb(ai, tmp_aeb);
792 	}
793 	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) {
794 		list_del(&tmp_aeb->u.list);
795 		ubi_free_aeb(ai, tmp_aeb);
796 	}
797 
798 	return ret;
799 }
800 
801 /**
802  * find_fm_anchor - find the most recent Fastmap superblock (anchor)
803  * @ai: UBI attach info to be filled
804  */
805 static int find_fm_anchor(struct ubi_attach_info *ai)
806 {
807 	int ret = -1;
808 	struct ubi_ainf_peb *aeb;
809 	unsigned long long max_sqnum = 0;
810 
811 	list_for_each_entry(aeb, &ai->fastmap, u.list) {
812 		if (aeb->vol_id == UBI_FM_SB_VOLUME_ID && aeb->sqnum > max_sqnum) {
813 			max_sqnum = aeb->sqnum;
814 			ret = aeb->pnum;
815 		}
816 	}
817 
818 	return ret;
819 }
820 
821 static struct ubi_ainf_peb *clone_aeb(struct ubi_attach_info *ai,
822 				      struct ubi_ainf_peb *old)
823 {
824 	struct ubi_ainf_peb *new;
825 
826 	new = ubi_alloc_aeb(ai, old->pnum, old->ec);
827 	if (!new)
828 		return NULL;
829 
830 	new->vol_id = old->vol_id;
831 	new->sqnum = old->sqnum;
832 	new->lnum = old->lnum;
833 	new->scrub = old->scrub;
834 	new->copy_flag = old->copy_flag;
835 
836 	return new;
837 }
838 
839 /**
840  * ubi_scan_fastmap - scan the fastmap.
841  * @ubi: UBI device object
842  * @ai: UBI attach info to be filled
843  * @scan_ai: UBI attach info from the first 64 PEBs,
844  *           used to find the most recent Fastmap data structure
845  *
846  * Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found,
847  * UBI_BAD_FASTMAP if one was found but is not usable.
848  * < 0 indicates an internal error.
849  */
850 int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
851 		     struct ubi_attach_info *scan_ai)
852 {
853 	struct ubi_fm_sb *fmsb, *fmsb2;
854 	struct ubi_vid_io_buf *vb;
855 	struct ubi_vid_hdr *vh;
856 	struct ubi_ec_hdr *ech;
857 	struct ubi_fastmap_layout *fm;
858 	struct ubi_ainf_peb *aeb;
859 	int i, used_blocks, pnum, fm_anchor, ret = 0;
860 	size_t fm_size;
861 	__be32 crc, tmp_crc;
862 	unsigned long long sqnum = 0;
863 
864 	fm_anchor = find_fm_anchor(scan_ai);
865 	if (fm_anchor < 0)
866 		return UBI_NO_FASTMAP;
867 
868 	/* Copy all (possible) fastmap blocks into our new attach structure. */
869 	list_for_each_entry(aeb, &scan_ai->fastmap, u.list) {
870 		struct ubi_ainf_peb *new;
871 
872 		new = clone_aeb(ai, aeb);
873 		if (!new)
874 			return -ENOMEM;
875 
876 		list_add(&new->u.list, &ai->fastmap);
877 	}
878 
879 	down_write(&ubi->fm_protect);
880 	memset(ubi->fm_buf, 0, ubi->fm_size);
881 
882 	fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL);
883 	if (!fmsb) {
884 		ret = -ENOMEM;
885 		goto out;
886 	}
887 
888 	fm = kzalloc(sizeof(*fm), GFP_KERNEL);
889 	if (!fm) {
890 		ret = -ENOMEM;
891 		kfree(fmsb);
892 		goto out;
893 	}
894 
895 	ret = ubi_io_read_data(ubi, fmsb, fm_anchor, 0, sizeof(*fmsb));
896 	if (ret && ret != UBI_IO_BITFLIPS)
897 		goto free_fm_sb;
898 	else if (ret == UBI_IO_BITFLIPS)
899 		fm->to_be_tortured[0] = 1;
900 
901 	if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) {
902 		ubi_err(ubi, "bad super block magic: 0x%x, expected: 0x%x",
903 			be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC);
904 		ret = UBI_BAD_FASTMAP;
905 		goto free_fm_sb;
906 	}
907 
908 	if (fmsb->version != UBI_FM_FMT_VERSION) {
909 		ubi_err(ubi, "bad fastmap version: %i, expected: %i",
910 			fmsb->version, UBI_FM_FMT_VERSION);
911 		ret = UBI_BAD_FASTMAP;
912 		goto free_fm_sb;
913 	}
914 
915 	used_blocks = be32_to_cpu(fmsb->used_blocks);
916 	if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) {
917 		ubi_err(ubi, "number of fastmap blocks is invalid: %i",
918 			used_blocks);
919 		ret = UBI_BAD_FASTMAP;
920 		goto free_fm_sb;
921 	}
922 
923 	fm_size = ubi->leb_size * used_blocks;
924 	if (fm_size != ubi->fm_size) {
925 		ubi_err(ubi, "bad fastmap size: %zi, expected: %zi",
926 			fm_size, ubi->fm_size);
927 		ret = UBI_BAD_FASTMAP;
928 		goto free_fm_sb;
929 	}
930 
931 	ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
932 	if (!ech) {
933 		ret = -ENOMEM;
934 		goto free_fm_sb;
935 	}
936 
937 	vb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
938 	if (!vb) {
939 		ret = -ENOMEM;
940 		goto free_hdr;
941 	}
942 
943 	vh = ubi_get_vid_hdr(vb);
944 
945 	for (i = 0; i < used_blocks; i++) {
946 		int image_seq;
947 
948 		pnum = be32_to_cpu(fmsb->block_loc[i]);
949 
950 		if (ubi_io_is_bad(ubi, pnum)) {
951 			ret = UBI_BAD_FASTMAP;
952 			goto free_hdr;
953 		}
954 
955 		if (i == 0 && pnum != fm_anchor) {
956 			ubi_err(ubi, "Fastmap anchor PEB mismatch: PEB: %i vs. %i",
957 				pnum, fm_anchor);
958 			ret = UBI_BAD_FASTMAP;
959 			goto free_hdr;
960 		}
961 
962 		ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
963 		if (ret && ret != UBI_IO_BITFLIPS) {
964 			ubi_err(ubi, "unable to read fastmap block# %i EC (PEB: %i)",
965 				i, pnum);
966 			if (ret > 0)
967 				ret = UBI_BAD_FASTMAP;
968 			goto free_hdr;
969 		} else if (ret == UBI_IO_BITFLIPS)
970 			fm->to_be_tortured[i] = 1;
971 
972 		image_seq = be32_to_cpu(ech->image_seq);
973 		if (!ubi->image_seq)
974 			ubi->image_seq = image_seq;
975 
976 		/*
977 		 * Older UBI implementations have image_seq set to zero, so
978 		 * we shouldn't fail if image_seq == 0.
979 		 */
980 		if (image_seq && (image_seq != ubi->image_seq)) {
981 			ubi_err(ubi, "wrong image seq:%d instead of %d",
982 				be32_to_cpu(ech->image_seq), ubi->image_seq);
983 			ret = UBI_BAD_FASTMAP;
984 			goto free_hdr;
985 		}
986 
987 		ret = ubi_io_read_vid_hdr(ubi, pnum, vb, 0);
988 		if (ret && ret != UBI_IO_BITFLIPS) {
989 			ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i)",
990 				i, pnum);
991 			goto free_hdr;
992 		}
993 
994 		if (i == 0) {
995 			if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) {
996 				ubi_err(ubi, "bad fastmap anchor vol_id: 0x%x, expected: 0x%x",
997 					be32_to_cpu(vh->vol_id),
998 					UBI_FM_SB_VOLUME_ID);
999 				ret = UBI_BAD_FASTMAP;
1000 				goto free_hdr;
1001 			}
1002 		} else {
1003 			if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) {
1004 				ubi_err(ubi, "bad fastmap data vol_id: 0x%x, expected: 0x%x",
1005 					be32_to_cpu(vh->vol_id),
1006 					UBI_FM_DATA_VOLUME_ID);
1007 				ret = UBI_BAD_FASTMAP;
1008 				goto free_hdr;
1009 			}
1010 		}
1011 
1012 		if (sqnum < be64_to_cpu(vh->sqnum))
1013 			sqnum = be64_to_cpu(vh->sqnum);
1014 
1015 		ret = ubi_io_read_data(ubi, ubi->fm_buf + (ubi->leb_size * i),
1016 				       pnum, 0, ubi->leb_size);
1017 		if (ret && ret != UBI_IO_BITFLIPS) {
1018 			ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i, "
1019 				"err: %i)", i, pnum, ret);
1020 			goto free_hdr;
1021 		}
1022 	}
1023 
1024 	kfree(fmsb);
1025 	fmsb = NULL;
1026 
1027 	fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf);
1028 	tmp_crc = be32_to_cpu(fmsb2->data_crc);
1029 	fmsb2->data_crc = 0;
1030 	crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size);
1031 	if (crc != tmp_crc) {
1032 		ubi_err(ubi, "fastmap data CRC is invalid");
1033 		ubi_err(ubi, "CRC should be: 0x%x, calc: 0x%x",
1034 			tmp_crc, crc);
1035 		ret = UBI_BAD_FASTMAP;
1036 		goto free_hdr;
1037 	}
1038 
1039 	fmsb2->sqnum = sqnum;
1040 
1041 	fm->used_blocks = used_blocks;
1042 
1043 	ret = ubi_attach_fastmap(ubi, ai, fm);
1044 	if (ret) {
1045 		if (ret > 0)
1046 			ret = UBI_BAD_FASTMAP;
1047 		goto free_hdr;
1048 	}
1049 
1050 	for (i = 0; i < used_blocks; i++) {
1051 		struct ubi_wl_entry *e;
1052 
1053 		e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1054 		if (!e) {
1055 			while (i--)
1056 				kmem_cache_free(ubi_wl_entry_slab, fm->e[i]);
1057 
1058 			ret = -ENOMEM;
1059 			goto free_hdr;
1060 		}
1061 
1062 		e->pnum = be32_to_cpu(fmsb2->block_loc[i]);
1063 		e->ec = be32_to_cpu(fmsb2->block_ec[i]);
1064 		fm->e[i] = e;
1065 	}
1066 
1067 	ubi->fm = fm;
1068 	ubi->fm_pool.max_size = ubi->fm->max_pool_size;
1069 	ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size;
1070 	ubi_msg(ubi, "attached by fastmap");
1071 	ubi_msg(ubi, "fastmap pool size: %d", ubi->fm_pool.max_size);
1072 	ubi_msg(ubi, "fastmap WL pool size: %d",
1073 		ubi->fm_wl_pool.max_size);
1074 	ubi->fm_disabled = 0;
1075 	ubi->fast_attach = 1;
1076 
1077 	ubi_free_vid_buf(vb);
1078 	kfree(ech);
1079 out:
1080 	up_write(&ubi->fm_protect);
1081 	if (ret == UBI_BAD_FASTMAP)
1082 		ubi_err(ubi, "Attach by fastmap failed, doing a full scan!");
1083 	return ret;
1084 
1085 free_hdr:
1086 	ubi_free_vid_buf(vb);
1087 	kfree(ech);
1088 free_fm_sb:
1089 	kfree(fmsb);
1090 	kfree(fm);
1091 	goto out;
1092 }
1093 
1094 int ubi_fastmap_init_checkmap(struct ubi_volume *vol, int leb_count)
1095 {
1096 	struct ubi_device *ubi = vol->ubi;
1097 
1098 	if (!ubi->fast_attach)
1099 		return 0;
1100 
1101 	vol->checkmap = kcalloc(BITS_TO_LONGS(leb_count), sizeof(unsigned long),
1102 				GFP_KERNEL);
1103 	if (!vol->checkmap)
1104 		return -ENOMEM;
1105 
1106 	return 0;
1107 }
1108 
1109 void ubi_fastmap_destroy_checkmap(struct ubi_volume *vol)
1110 {
1111 	kfree(vol->checkmap);
1112 }
1113 
1114 /**
1115  * ubi_write_fastmap - writes a fastmap.
1116  * @ubi: UBI device object
1117  * @new_fm: the to be written fastmap
1118  *
1119  * Returns 0 on success, < 0 indicates an internal error.
1120  */
1121 static int ubi_write_fastmap(struct ubi_device *ubi,
1122 			     struct ubi_fastmap_layout *new_fm)
1123 {
1124 	size_t fm_pos = 0;
1125 	void *fm_raw;
1126 	struct ubi_fm_sb *fmsb;
1127 	struct ubi_fm_hdr *fmh;
1128 	struct ubi_fm_scan_pool *fmpl, *fmpl_wl;
1129 	struct ubi_fm_ec *fec;
1130 	struct ubi_fm_volhdr *fvh;
1131 	struct ubi_fm_eba *feba;
1132 	struct ubi_wl_entry *wl_e;
1133 	struct ubi_volume *vol;
1134 	struct ubi_vid_io_buf *avbuf, *dvbuf;
1135 	struct ubi_vid_hdr *avhdr, *dvhdr;
1136 	struct ubi_work *ubi_wrk;
1137 	struct rb_node *tmp_rb;
1138 	int ret, i, j, free_peb_count, used_peb_count, vol_count;
1139 	int scrub_peb_count, erase_peb_count;
1140 	unsigned long *seen_pebs = NULL;
1141 
1142 	fm_raw = ubi->fm_buf;
1143 	memset(ubi->fm_buf, 0, ubi->fm_size);
1144 
1145 	avbuf = new_fm_vbuf(ubi, UBI_FM_SB_VOLUME_ID);
1146 	if (!avbuf) {
1147 		ret = -ENOMEM;
1148 		goto out;
1149 	}
1150 
1151 	dvbuf = new_fm_vbuf(ubi, UBI_FM_DATA_VOLUME_ID);
1152 	if (!dvbuf) {
1153 		ret = -ENOMEM;
1154 		goto out_kfree;
1155 	}
1156 
1157 	avhdr = ubi_get_vid_hdr(avbuf);
1158 	dvhdr = ubi_get_vid_hdr(dvbuf);
1159 
1160 	seen_pebs = init_seen(ubi);
1161 	if (IS_ERR(seen_pebs)) {
1162 		ret = PTR_ERR(seen_pebs);
1163 		goto out_kfree;
1164 	}
1165 
1166 	spin_lock(&ubi->volumes_lock);
1167 	spin_lock(&ubi->wl_lock);
1168 
1169 	fmsb = (struct ubi_fm_sb *)fm_raw;
1170 	fm_pos += sizeof(*fmsb);
1171 	ubi_assert(fm_pos <= ubi->fm_size);
1172 
1173 	fmh = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
1174 	fm_pos += sizeof(*fmh);
1175 	ubi_assert(fm_pos <= ubi->fm_size);
1176 
1177 	fmsb->magic = cpu_to_be32(UBI_FM_SB_MAGIC);
1178 	fmsb->version = UBI_FM_FMT_VERSION;
1179 	fmsb->used_blocks = cpu_to_be32(new_fm->used_blocks);
1180 	/* the max sqnum will be filled in while *reading* the fastmap */
1181 	fmsb->sqnum = 0;
1182 
1183 	fmh->magic = cpu_to_be32(UBI_FM_HDR_MAGIC);
1184 	free_peb_count = 0;
1185 	used_peb_count = 0;
1186 	scrub_peb_count = 0;
1187 	erase_peb_count = 0;
1188 	vol_count = 0;
1189 
1190 	fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1191 	fm_pos += sizeof(*fmpl);
1192 	fmpl->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1193 	fmpl->size = cpu_to_be16(ubi->fm_pool.size);
1194 	fmpl->max_size = cpu_to_be16(ubi->fm_pool.max_size);
1195 
1196 	for (i = 0; i < ubi->fm_pool.size; i++) {
1197 		fmpl->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]);
1198 		set_seen(ubi, ubi->fm_pool.pebs[i], seen_pebs);
1199 	}
1200 
1201 	fmpl_wl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1202 	fm_pos += sizeof(*fmpl_wl);
1203 	fmpl_wl->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1204 	fmpl_wl->size = cpu_to_be16(ubi->fm_wl_pool.size);
1205 	fmpl_wl->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size);
1206 
1207 	for (i = 0; i < ubi->fm_wl_pool.size; i++) {
1208 		fmpl_wl->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]);
1209 		set_seen(ubi, ubi->fm_wl_pool.pebs[i], seen_pebs);
1210 	}
1211 
1212 	ubi_for_each_free_peb(ubi, wl_e, tmp_rb) {
1213 		fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1214 
1215 		fec->pnum = cpu_to_be32(wl_e->pnum);
1216 		set_seen(ubi, wl_e->pnum, seen_pebs);
1217 		fec->ec = cpu_to_be32(wl_e->ec);
1218 
1219 		free_peb_count++;
1220 		fm_pos += sizeof(*fec);
1221 		ubi_assert(fm_pos <= ubi->fm_size);
1222 	}
1223 	fmh->free_peb_count = cpu_to_be32(free_peb_count);
1224 
1225 	ubi_for_each_used_peb(ubi, wl_e, tmp_rb) {
1226 		fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1227 
1228 		fec->pnum = cpu_to_be32(wl_e->pnum);
1229 		set_seen(ubi, wl_e->pnum, seen_pebs);
1230 		fec->ec = cpu_to_be32(wl_e->ec);
1231 
1232 		used_peb_count++;
1233 		fm_pos += sizeof(*fec);
1234 		ubi_assert(fm_pos <= ubi->fm_size);
1235 	}
1236 
1237 	ubi_for_each_protected_peb(ubi, i, wl_e) {
1238 		fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1239 
1240 		fec->pnum = cpu_to_be32(wl_e->pnum);
1241 		set_seen(ubi, wl_e->pnum, seen_pebs);
1242 		fec->ec = cpu_to_be32(wl_e->ec);
1243 
1244 		used_peb_count++;
1245 		fm_pos += sizeof(*fec);
1246 		ubi_assert(fm_pos <= ubi->fm_size);
1247 	}
1248 	fmh->used_peb_count = cpu_to_be32(used_peb_count);
1249 
1250 	ubi_for_each_scrub_peb(ubi, wl_e, tmp_rb) {
1251 		fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1252 
1253 		fec->pnum = cpu_to_be32(wl_e->pnum);
1254 		set_seen(ubi, wl_e->pnum, seen_pebs);
1255 		fec->ec = cpu_to_be32(wl_e->ec);
1256 
1257 		scrub_peb_count++;
1258 		fm_pos += sizeof(*fec);
1259 		ubi_assert(fm_pos <= ubi->fm_size);
1260 	}
1261 	fmh->scrub_peb_count = cpu_to_be32(scrub_peb_count);
1262 
1263 
1264 	list_for_each_entry(ubi_wrk, &ubi->works, list) {
1265 		if (ubi_is_erase_work(ubi_wrk)) {
1266 			wl_e = ubi_wrk->e;
1267 			ubi_assert(wl_e);
1268 
1269 			fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1270 
1271 			fec->pnum = cpu_to_be32(wl_e->pnum);
1272 			set_seen(ubi, wl_e->pnum, seen_pebs);
1273 			fec->ec = cpu_to_be32(wl_e->ec);
1274 
1275 			erase_peb_count++;
1276 			fm_pos += sizeof(*fec);
1277 			ubi_assert(fm_pos <= ubi->fm_size);
1278 		}
1279 	}
1280 	fmh->erase_peb_count = cpu_to_be32(erase_peb_count);
1281 
1282 	for (i = 0; i < UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT; i++) {
1283 		vol = ubi->volumes[i];
1284 
1285 		if (!vol)
1286 			continue;
1287 
1288 		vol_count++;
1289 
1290 		fvh = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
1291 		fm_pos += sizeof(*fvh);
1292 		ubi_assert(fm_pos <= ubi->fm_size);
1293 
1294 		fvh->magic = cpu_to_be32(UBI_FM_VHDR_MAGIC);
1295 		fvh->vol_id = cpu_to_be32(vol->vol_id);
1296 		fvh->vol_type = vol->vol_type;
1297 		fvh->used_ebs = cpu_to_be32(vol->used_ebs);
1298 		fvh->data_pad = cpu_to_be32(vol->data_pad);
1299 		fvh->last_eb_bytes = cpu_to_be32(vol->last_eb_bytes);
1300 
1301 		ubi_assert(vol->vol_type == UBI_DYNAMIC_VOLUME ||
1302 			vol->vol_type == UBI_STATIC_VOLUME);
1303 
1304 		feba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
1305 		fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->reserved_pebs);
1306 		ubi_assert(fm_pos <= ubi->fm_size);
1307 
1308 		for (j = 0; j < vol->reserved_pebs; j++) {
1309 			struct ubi_eba_leb_desc ldesc;
1310 
1311 			ubi_eba_get_ldesc(vol, j, &ldesc);
1312 			feba->pnum[j] = cpu_to_be32(ldesc.pnum);
1313 		}
1314 
1315 		feba->reserved_pebs = cpu_to_be32(j);
1316 		feba->magic = cpu_to_be32(UBI_FM_EBA_MAGIC);
1317 	}
1318 	fmh->vol_count = cpu_to_be32(vol_count);
1319 	fmh->bad_peb_count = cpu_to_be32(ubi->bad_peb_count);
1320 
1321 	avhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1322 	avhdr->lnum = 0;
1323 
1324 	spin_unlock(&ubi->wl_lock);
1325 	spin_unlock(&ubi->volumes_lock);
1326 
1327 	dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum);
1328 	ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avbuf);
1329 	if (ret) {
1330 		ubi_err(ubi, "unable to write vid_hdr to fastmap SB!");
1331 		goto out_kfree;
1332 	}
1333 
1334 	for (i = 0; i < new_fm->used_blocks; i++) {
1335 		fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum);
1336 		set_seen(ubi, new_fm->e[i]->pnum, seen_pebs);
1337 		fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec);
1338 	}
1339 
1340 	fmsb->data_crc = 0;
1341 	fmsb->data_crc = cpu_to_be32(crc32(UBI_CRC32_INIT, fm_raw,
1342 					   ubi->fm_size));
1343 
1344 	for (i = 1; i < new_fm->used_blocks; i++) {
1345 		dvhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1346 		dvhdr->lnum = cpu_to_be32(i);
1347 		dbg_bld("writing fastmap data to PEB %i sqnum %llu",
1348 			new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum));
1349 		ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvbuf);
1350 		if (ret) {
1351 			ubi_err(ubi, "unable to write vid_hdr to PEB %i!",
1352 				new_fm->e[i]->pnum);
1353 			goto out_kfree;
1354 		}
1355 	}
1356 
1357 	for (i = 0; i < new_fm->used_blocks; i++) {
1358 		ret = ubi_io_write_data(ubi, fm_raw + (i * ubi->leb_size),
1359 					new_fm->e[i]->pnum, 0, ubi->leb_size);
1360 		if (ret) {
1361 			ubi_err(ubi, "unable to write fastmap to PEB %i!",
1362 				new_fm->e[i]->pnum);
1363 			goto out_kfree;
1364 		}
1365 	}
1366 
1367 	ubi_assert(new_fm);
1368 	ubi->fm = new_fm;
1369 
1370 	ret = self_check_seen(ubi, seen_pebs);
1371 	dbg_bld("fastmap written!");
1372 
1373 out_kfree:
1374 	ubi_free_vid_buf(avbuf);
1375 	ubi_free_vid_buf(dvbuf);
1376 	free_seen(seen_pebs);
1377 out:
1378 	return ret;
1379 }
1380 
1381 /**
1382  * erase_block - Manually erase a PEB.
1383  * @ubi: UBI device object
1384  * @pnum: PEB to be erased
1385  *
1386  * Returns the new EC value on success, < 0 indicates an internal error.
1387  */
1388 static int erase_block(struct ubi_device *ubi, int pnum)
1389 {
1390 	int ret;
1391 	struct ubi_ec_hdr *ec_hdr;
1392 	long long ec;
1393 
1394 	ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
1395 	if (!ec_hdr)
1396 		return -ENOMEM;
1397 
1398 	ret = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1399 	if (ret < 0)
1400 		goto out;
1401 	else if (ret && ret != UBI_IO_BITFLIPS) {
1402 		ret = -EINVAL;
1403 		goto out;
1404 	}
1405 
1406 	ret = ubi_io_sync_erase(ubi, pnum, 0);
1407 	if (ret < 0)
1408 		goto out;
1409 
1410 	ec = be64_to_cpu(ec_hdr->ec);
1411 	ec += ret;
1412 	if (ec > UBI_MAX_ERASECOUNTER) {
1413 		ret = -EINVAL;
1414 		goto out;
1415 	}
1416 
1417 	ec_hdr->ec = cpu_to_be64(ec);
1418 	ret = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
1419 	if (ret < 0)
1420 		goto out;
1421 
1422 	ret = ec;
1423 out:
1424 	kfree(ec_hdr);
1425 	return ret;
1426 }
1427 
1428 /**
1429  * invalidate_fastmap - destroys a fastmap.
1430  * @ubi: UBI device object
1431  *
1432  * This function ensures that upon next UBI attach a full scan
1433  * is issued. We need this if UBI is about to write a new fastmap
1434  * but is unable to do so. In this case we have two options:
1435  * a) Make sure that the current fastmap will not be usued upon
1436  * attach time and contine or b) fall back to RO mode to have the
1437  * current fastmap in a valid state.
1438  * Returns 0 on success, < 0 indicates an internal error.
1439  */
1440 static int invalidate_fastmap(struct ubi_device *ubi)
1441 {
1442 	int ret;
1443 	struct ubi_fastmap_layout *fm;
1444 	struct ubi_wl_entry *e;
1445 	struct ubi_vid_io_buf *vb = NULL;
1446 	struct ubi_vid_hdr *vh;
1447 
1448 	if (!ubi->fm)
1449 		return 0;
1450 
1451 	ubi->fm = NULL;
1452 
1453 	ret = -ENOMEM;
1454 	fm = kzalloc(sizeof(*fm), GFP_KERNEL);
1455 	if (!fm)
1456 		goto out;
1457 
1458 	vb = new_fm_vbuf(ubi, UBI_FM_SB_VOLUME_ID);
1459 	if (!vb)
1460 		goto out_free_fm;
1461 
1462 	vh = ubi_get_vid_hdr(vb);
1463 
1464 	ret = -ENOSPC;
1465 	e = ubi_wl_get_fm_peb(ubi, 1);
1466 	if (!e)
1467 		goto out_free_fm;
1468 
1469 	/*
1470 	 * Create fake fastmap such that UBI will fall back
1471 	 * to scanning mode.
1472 	 */
1473 	vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1474 	ret = ubi_io_write_vid_hdr(ubi, e->pnum, vb);
1475 	if (ret < 0) {
1476 		ubi_wl_put_fm_peb(ubi, e, 0, 0);
1477 		goto out_free_fm;
1478 	}
1479 
1480 	fm->used_blocks = 1;
1481 	fm->e[0] = e;
1482 
1483 	ubi->fm = fm;
1484 
1485 out:
1486 	ubi_free_vid_buf(vb);
1487 	return ret;
1488 
1489 out_free_fm:
1490 	kfree(fm);
1491 	goto out;
1492 }
1493 
1494 /**
1495  * return_fm_pebs - returns all PEBs used by a fastmap back to the
1496  * WL sub-system.
1497  * @ubi: UBI device object
1498  * @fm: fastmap layout object
1499  */
1500 static void return_fm_pebs(struct ubi_device *ubi,
1501 			   struct ubi_fastmap_layout *fm)
1502 {
1503 	int i;
1504 
1505 	if (!fm)
1506 		return;
1507 
1508 	for (i = 0; i < fm->used_blocks; i++) {
1509 		if (fm->e[i]) {
1510 			ubi_wl_put_fm_peb(ubi, fm->e[i], i,
1511 					  fm->to_be_tortured[i]);
1512 			fm->e[i] = NULL;
1513 		}
1514 	}
1515 }
1516 
1517 /**
1518  * ubi_update_fastmap - will be called by UBI if a volume changes or
1519  * a fastmap pool becomes full.
1520  * @ubi: UBI device object
1521  *
1522  * Returns 0 on success, < 0 indicates an internal error.
1523  */
1524 int ubi_update_fastmap(struct ubi_device *ubi)
1525 {
1526 	int ret, i, j;
1527 	struct ubi_fastmap_layout *new_fm, *old_fm;
1528 	struct ubi_wl_entry *tmp_e;
1529 
1530 	down_write(&ubi->fm_protect);
1531 	down_write(&ubi->work_sem);
1532 	down_write(&ubi->fm_eba_sem);
1533 
1534 	ubi_refill_pools(ubi);
1535 
1536 	if (ubi->ro_mode || ubi->fm_disabled) {
1537 		up_write(&ubi->fm_eba_sem);
1538 		up_write(&ubi->work_sem);
1539 		up_write(&ubi->fm_protect);
1540 		return 0;
1541 	}
1542 
1543 	ret = ubi_ensure_anchor_pebs(ubi);
1544 	if (ret) {
1545 		up_write(&ubi->fm_eba_sem);
1546 		up_write(&ubi->work_sem);
1547 		up_write(&ubi->fm_protect);
1548 		return ret;
1549 	}
1550 
1551 	new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
1552 	if (!new_fm) {
1553 		up_write(&ubi->fm_eba_sem);
1554 		up_write(&ubi->work_sem);
1555 		up_write(&ubi->fm_protect);
1556 		return -ENOMEM;
1557 	}
1558 
1559 	new_fm->used_blocks = ubi->fm_size / ubi->leb_size;
1560 	old_fm = ubi->fm;
1561 	ubi->fm = NULL;
1562 
1563 	if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) {
1564 		ubi_err(ubi, "fastmap too large");
1565 		ret = -ENOSPC;
1566 		goto err;
1567 	}
1568 
1569 	for (i = 1; i < new_fm->used_blocks; i++) {
1570 		spin_lock(&ubi->wl_lock);
1571 		tmp_e = ubi_wl_get_fm_peb(ubi, 0);
1572 		spin_unlock(&ubi->wl_lock);
1573 
1574 		if (!tmp_e) {
1575 			if (old_fm && old_fm->e[i]) {
1576 				ret = erase_block(ubi, old_fm->e[i]->pnum);
1577 				if (ret < 0) {
1578 					ubi_err(ubi, "could not erase old fastmap PEB");
1579 
1580 					for (j = 1; j < i; j++) {
1581 						ubi_wl_put_fm_peb(ubi, new_fm->e[j],
1582 								  j, 0);
1583 						new_fm->e[j] = NULL;
1584 					}
1585 					goto err;
1586 				}
1587 				new_fm->e[i] = old_fm->e[i];
1588 				old_fm->e[i] = NULL;
1589 			} else {
1590 				ubi_err(ubi, "could not get any free erase block");
1591 
1592 				for (j = 1; j < i; j++) {
1593 					ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0);
1594 					new_fm->e[j] = NULL;
1595 				}
1596 
1597 				ret = -ENOSPC;
1598 				goto err;
1599 			}
1600 		} else {
1601 			new_fm->e[i] = tmp_e;
1602 
1603 			if (old_fm && old_fm->e[i]) {
1604 				ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
1605 						  old_fm->to_be_tortured[i]);
1606 				old_fm->e[i] = NULL;
1607 			}
1608 		}
1609 	}
1610 
1611 	/* Old fastmap is larger than the new one */
1612 	if (old_fm && new_fm->used_blocks < old_fm->used_blocks) {
1613 		for (i = new_fm->used_blocks; i < old_fm->used_blocks; i++) {
1614 			ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
1615 					  old_fm->to_be_tortured[i]);
1616 			old_fm->e[i] = NULL;
1617 		}
1618 	}
1619 
1620 	spin_lock(&ubi->wl_lock);
1621 	tmp_e = ubi_wl_get_fm_peb(ubi, 1);
1622 	spin_unlock(&ubi->wl_lock);
1623 
1624 	if (old_fm) {
1625 		/* no fresh anchor PEB was found, reuse the old one */
1626 		if (!tmp_e) {
1627 			ret = erase_block(ubi, old_fm->e[0]->pnum);
1628 			if (ret < 0) {
1629 				ubi_err(ubi, "could not erase old anchor PEB");
1630 
1631 				for (i = 1; i < new_fm->used_blocks; i++) {
1632 					ubi_wl_put_fm_peb(ubi, new_fm->e[i],
1633 							  i, 0);
1634 					new_fm->e[i] = NULL;
1635 				}
1636 				goto err;
1637 			}
1638 			new_fm->e[0] = old_fm->e[0];
1639 			new_fm->e[0]->ec = ret;
1640 			old_fm->e[0] = NULL;
1641 		} else {
1642 			/* we've got a new anchor PEB, return the old one */
1643 			ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0,
1644 					  old_fm->to_be_tortured[0]);
1645 			new_fm->e[0] = tmp_e;
1646 			old_fm->e[0] = NULL;
1647 		}
1648 	} else {
1649 		if (!tmp_e) {
1650 			ubi_err(ubi, "could not find any anchor PEB");
1651 
1652 			for (i = 1; i < new_fm->used_blocks; i++) {
1653 				ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0);
1654 				new_fm->e[i] = NULL;
1655 			}
1656 
1657 			ret = -ENOSPC;
1658 			goto err;
1659 		}
1660 		new_fm->e[0] = tmp_e;
1661 	}
1662 
1663 	ret = ubi_write_fastmap(ubi, new_fm);
1664 
1665 	if (ret)
1666 		goto err;
1667 
1668 out_unlock:
1669 	up_write(&ubi->fm_eba_sem);
1670 	up_write(&ubi->work_sem);
1671 	up_write(&ubi->fm_protect);
1672 	kfree(old_fm);
1673 	return ret;
1674 
1675 err:
1676 	ubi_warn(ubi, "Unable to write new fastmap, err=%i", ret);
1677 
1678 	ret = invalidate_fastmap(ubi);
1679 	if (ret < 0) {
1680 		ubi_err(ubi, "Unable to invalidate current fastmap!");
1681 		ubi_ro_mode(ubi);
1682 	} else {
1683 		return_fm_pebs(ubi, old_fm);
1684 		return_fm_pebs(ubi, new_fm);
1685 		ret = 0;
1686 	}
1687 
1688 	kfree(new_fm);
1689 	goto out_unlock;
1690 }
1691