xref: /openbmc/linux/drivers/mtd/ubi/fastmap.c (revision 905e46ac)
1 /*
2  * Copyright (c) 2012 Linutronix GmbH
3  * Copyright (c) 2014 sigma star gmbh
4  * Author: Richard Weinberger <richard@nod.at>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; version 2.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
13  * the GNU General Public License for more details.
14  *
15  */
16 
17 #include <linux/crc32.h>
18 #include <linux/bitmap.h>
19 #include "ubi.h"
20 
21 /**
22  * init_seen - allocate memory for used for debugging.
23  * @ubi: UBI device description object
24  */
25 static inline unsigned long *init_seen(struct ubi_device *ubi)
26 {
27 	unsigned long *ret;
28 
29 	if (!ubi_dbg_chk_fastmap(ubi))
30 		return NULL;
31 
32 	ret = kcalloc(BITS_TO_LONGS(ubi->peb_count), sizeof(unsigned long),
33 		      GFP_KERNEL);
34 	if (!ret)
35 		return ERR_PTR(-ENOMEM);
36 
37 	return ret;
38 }
39 
40 /**
41  * free_seen - free the seen logic integer array.
42  * @seen: integer array of @ubi->peb_count size
43  */
44 static inline void free_seen(unsigned long *seen)
45 {
46 	kfree(seen);
47 }
48 
49 /**
50  * set_seen - mark a PEB as seen.
51  * @ubi: UBI device description object
52  * @pnum: The PEB to be makred as seen
53  * @seen: integer array of @ubi->peb_count size
54  */
55 static inline void set_seen(struct ubi_device *ubi, int pnum, unsigned long *seen)
56 {
57 	if (!ubi_dbg_chk_fastmap(ubi) || !seen)
58 		return;
59 
60 	set_bit(pnum, seen);
61 }
62 
63 /**
64  * self_check_seen - check whether all PEB have been seen by fastmap.
65  * @ubi: UBI device description object
66  * @seen: integer array of @ubi->peb_count size
67  */
68 static int self_check_seen(struct ubi_device *ubi, unsigned long *seen)
69 {
70 	int pnum, ret = 0;
71 
72 	if (!ubi_dbg_chk_fastmap(ubi) || !seen)
73 		return 0;
74 
75 	for (pnum = 0; pnum < ubi->peb_count; pnum++) {
76 		if (test_bit(pnum, seen) && ubi->lookuptbl[pnum]) {
77 			ubi_err(ubi, "self-check failed for PEB %d, fastmap didn't see it", pnum);
78 			ret = -EINVAL;
79 		}
80 	}
81 
82 	return ret;
83 }
84 
85 /**
86  * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device.
87  * @ubi: UBI device description object
88  */
89 size_t ubi_calc_fm_size(struct ubi_device *ubi)
90 {
91 	size_t size;
92 
93 	size = sizeof(struct ubi_fm_sb) +
94 		sizeof(struct ubi_fm_hdr) +
95 		sizeof(struct ubi_fm_scan_pool) +
96 		sizeof(struct ubi_fm_scan_pool) +
97 		(ubi->peb_count * sizeof(struct ubi_fm_ec)) +
98 		(sizeof(struct ubi_fm_eba) +
99 		(ubi->peb_count * sizeof(__be32))) +
100 		sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
101 	return roundup(size, ubi->leb_size);
102 }
103 
104 
105 /**
106  * new_fm_vhdr - allocate a new volume header for fastmap usage.
107  * @ubi: UBI device description object
108  * @vol_id: the VID of the new header
109  *
110  * Returns a new struct ubi_vid_hdr on success.
111  * NULL indicates out of memory.
112  */
113 static struct ubi_vid_io_buf *new_fm_vbuf(struct ubi_device *ubi, int vol_id)
114 {
115 	struct ubi_vid_io_buf *new;
116 	struct ubi_vid_hdr *vh;
117 
118 	new = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
119 	if (!new)
120 		goto out;
121 
122 	vh = ubi_get_vid_hdr(new);
123 	vh->vol_type = UBI_VID_DYNAMIC;
124 	vh->vol_id = cpu_to_be32(vol_id);
125 
126 	/* UBI implementations without fastmap support have to delete the
127 	 * fastmap.
128 	 */
129 	vh->compat = UBI_COMPAT_DELETE;
130 
131 out:
132 	return new;
133 }
134 
135 /**
136  * add_aeb - create and add a attach erase block to a given list.
137  * @ai: UBI attach info object
138  * @list: the target list
139  * @pnum: PEB number of the new attach erase block
140  * @ec: erease counter of the new LEB
141  * @scrub: scrub this PEB after attaching
142  *
143  * Returns 0 on success, < 0 indicates an internal error.
144  */
145 static int add_aeb(struct ubi_attach_info *ai, struct list_head *list,
146 		   int pnum, int ec, int scrub)
147 {
148 	struct ubi_ainf_peb *aeb;
149 
150 	aeb = ubi_alloc_aeb(ai, pnum, ec);
151 	if (!aeb)
152 		return -ENOMEM;
153 
154 	aeb->lnum = -1;
155 	aeb->scrub = scrub;
156 	aeb->copy_flag = aeb->sqnum = 0;
157 
158 	ai->ec_sum += aeb->ec;
159 	ai->ec_count++;
160 
161 	if (ai->max_ec < aeb->ec)
162 		ai->max_ec = aeb->ec;
163 
164 	if (ai->min_ec > aeb->ec)
165 		ai->min_ec = aeb->ec;
166 
167 	list_add_tail(&aeb->u.list, list);
168 
169 	return 0;
170 }
171 
172 /**
173  * add_vol - create and add a new volume to ubi_attach_info.
174  * @ai: ubi_attach_info object
175  * @vol_id: VID of the new volume
176  * @used_ebs: number of used EBS
177  * @data_pad: data padding value of the new volume
178  * @vol_type: volume type
179  * @last_eb_bytes: number of bytes in the last LEB
180  *
181  * Returns the new struct ubi_ainf_volume on success.
182  * NULL indicates an error.
183  */
184 static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
185 				       int used_ebs, int data_pad, u8 vol_type,
186 				       int last_eb_bytes)
187 {
188 	struct ubi_ainf_volume *av;
189 
190 	av = ubi_add_av(ai, vol_id);
191 	if (IS_ERR(av))
192 		return av;
193 
194 	av->data_pad = data_pad;
195 	av->last_data_size = last_eb_bytes;
196 	av->compat = 0;
197 	av->vol_type = vol_type;
198 	if (av->vol_type == UBI_STATIC_VOLUME)
199 		av->used_ebs = used_ebs;
200 
201 	dbg_bld("found volume (ID %i)", vol_id);
202 	return av;
203 }
204 
205 /**
206  * assign_aeb_to_av - assigns a SEB to a given ainf_volume and removes it
207  * from it's original list.
208  * @ai: ubi_attach_info object
209  * @aeb: the to be assigned SEB
210  * @av: target scan volume
211  */
212 static void assign_aeb_to_av(struct ubi_attach_info *ai,
213 			     struct ubi_ainf_peb *aeb,
214 			     struct ubi_ainf_volume *av)
215 {
216 	struct ubi_ainf_peb *tmp_aeb;
217 	struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
218 
219 	p = &av->root.rb_node;
220 	while (*p) {
221 		parent = *p;
222 
223 		tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
224 		if (aeb->lnum != tmp_aeb->lnum) {
225 			if (aeb->lnum < tmp_aeb->lnum)
226 				p = &(*p)->rb_left;
227 			else
228 				p = &(*p)->rb_right;
229 
230 			continue;
231 		} else
232 			break;
233 	}
234 
235 	list_del(&aeb->u.list);
236 	av->leb_count++;
237 
238 	rb_link_node(&aeb->u.rb, parent, p);
239 	rb_insert_color(&aeb->u.rb, &av->root);
240 }
241 
242 /**
243  * update_vol - inserts or updates a LEB which was found a pool.
244  * @ubi: the UBI device object
245  * @ai: attach info object
246  * @av: the volume this LEB belongs to
247  * @new_vh: the volume header derived from new_aeb
248  * @new_aeb: the AEB to be examined
249  *
250  * Returns 0 on success, < 0 indicates an internal error.
251  */
252 static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
253 		      struct ubi_ainf_volume *av, struct ubi_vid_hdr *new_vh,
254 		      struct ubi_ainf_peb *new_aeb)
255 {
256 	struct rb_node **p = &av->root.rb_node, *parent = NULL;
257 	struct ubi_ainf_peb *aeb, *victim;
258 	int cmp_res;
259 
260 	while (*p) {
261 		parent = *p;
262 		aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
263 
264 		if (be32_to_cpu(new_vh->lnum) != aeb->lnum) {
265 			if (be32_to_cpu(new_vh->lnum) < aeb->lnum)
266 				p = &(*p)->rb_left;
267 			else
268 				p = &(*p)->rb_right;
269 
270 			continue;
271 		}
272 
273 		/* This case can happen if the fastmap gets written
274 		 * because of a volume change (creation, deletion, ..).
275 		 * Then a PEB can be within the persistent EBA and the pool.
276 		 */
277 		if (aeb->pnum == new_aeb->pnum) {
278 			ubi_assert(aeb->lnum == new_aeb->lnum);
279 			ubi_free_aeb(ai, new_aeb);
280 
281 			return 0;
282 		}
283 
284 		cmp_res = ubi_compare_lebs(ubi, aeb, new_aeb->pnum, new_vh);
285 		if (cmp_res < 0)
286 			return cmp_res;
287 
288 		/* new_aeb is newer */
289 		if (cmp_res & 1) {
290 			victim = ubi_alloc_aeb(ai, aeb->pnum, aeb->ec);
291 			if (!victim)
292 				return -ENOMEM;
293 
294 			list_add_tail(&victim->u.list, &ai->erase);
295 
296 			if (av->highest_lnum == be32_to_cpu(new_vh->lnum))
297 				av->last_data_size =
298 					be32_to_cpu(new_vh->data_size);
299 
300 			dbg_bld("vol %i: AEB %i's PEB %i is the newer",
301 				av->vol_id, aeb->lnum, new_aeb->pnum);
302 
303 			aeb->ec = new_aeb->ec;
304 			aeb->pnum = new_aeb->pnum;
305 			aeb->copy_flag = new_vh->copy_flag;
306 			aeb->scrub = new_aeb->scrub;
307 			aeb->sqnum = new_aeb->sqnum;
308 			ubi_free_aeb(ai, new_aeb);
309 
310 		/* new_aeb is older */
311 		} else {
312 			dbg_bld("vol %i: AEB %i's PEB %i is old, dropping it",
313 				av->vol_id, aeb->lnum, new_aeb->pnum);
314 			list_add_tail(&new_aeb->u.list, &ai->erase);
315 		}
316 
317 		return 0;
318 	}
319 	/* This LEB is new, let's add it to the volume */
320 
321 	if (av->highest_lnum <= be32_to_cpu(new_vh->lnum)) {
322 		av->highest_lnum = be32_to_cpu(new_vh->lnum);
323 		av->last_data_size = be32_to_cpu(new_vh->data_size);
324 	}
325 
326 	if (av->vol_type == UBI_STATIC_VOLUME)
327 		av->used_ebs = be32_to_cpu(new_vh->used_ebs);
328 
329 	av->leb_count++;
330 
331 	rb_link_node(&new_aeb->u.rb, parent, p);
332 	rb_insert_color(&new_aeb->u.rb, &av->root);
333 
334 	return 0;
335 }
336 
337 /**
338  * process_pool_aeb - we found a non-empty PEB in a pool.
339  * @ubi: UBI device object
340  * @ai: attach info object
341  * @new_vh: the volume header derived from new_aeb
342  * @new_aeb: the AEB to be examined
343  *
344  * Returns 0 on success, < 0 indicates an internal error.
345  */
346 static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
347 			    struct ubi_vid_hdr *new_vh,
348 			    struct ubi_ainf_peb *new_aeb)
349 {
350 	int vol_id = be32_to_cpu(new_vh->vol_id);
351 	struct ubi_ainf_volume *av;
352 
353 	if (vol_id == UBI_FM_SB_VOLUME_ID || vol_id == UBI_FM_DATA_VOLUME_ID) {
354 		ubi_free_aeb(ai, new_aeb);
355 
356 		return 0;
357 	}
358 
359 	/* Find the volume this SEB belongs to */
360 	av = ubi_find_av(ai, vol_id);
361 	if (!av) {
362 		ubi_err(ubi, "orphaned volume in fastmap pool!");
363 		ubi_free_aeb(ai, new_aeb);
364 		return UBI_BAD_FASTMAP;
365 	}
366 
367 	ubi_assert(vol_id == av->vol_id);
368 
369 	return update_vol(ubi, ai, av, new_vh, new_aeb);
370 }
371 
372 /**
373  * unmap_peb - unmap a PEB.
374  * If fastmap detects a free PEB in the pool it has to check whether
375  * this PEB has been unmapped after writing the fastmap.
376  *
377  * @ai: UBI attach info object
378  * @pnum: The PEB to be unmapped
379  */
380 static void unmap_peb(struct ubi_attach_info *ai, int pnum)
381 {
382 	struct ubi_ainf_volume *av;
383 	struct rb_node *node, *node2;
384 	struct ubi_ainf_peb *aeb;
385 
386 	ubi_rb_for_each_entry(node, av, &ai->volumes, rb) {
387 		ubi_rb_for_each_entry(node2, aeb, &av->root, u.rb) {
388 			if (aeb->pnum == pnum) {
389 				rb_erase(&aeb->u.rb, &av->root);
390 				av->leb_count--;
391 				ubi_free_aeb(ai, aeb);
392 				return;
393 			}
394 		}
395 	}
396 }
397 
398 /**
399  * scan_pool - scans a pool for changed (no longer empty PEBs).
400  * @ubi: UBI device object
401  * @ai: attach info object
402  * @pebs: an array of all PEB numbers in the to be scanned pool
403  * @pool_size: size of the pool (number of entries in @pebs)
404  * @max_sqnum: pointer to the maximal sequence number
405  * @free: list of PEBs which are most likely free (and go into @ai->free)
406  *
407  * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned.
408  * < 0 indicates an internal error.
409  */
410 static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
411 		     __be32 *pebs, int pool_size, unsigned long long *max_sqnum,
412 		     struct list_head *free)
413 {
414 	struct ubi_vid_io_buf *vb;
415 	struct ubi_vid_hdr *vh;
416 	struct ubi_ec_hdr *ech;
417 	struct ubi_ainf_peb *new_aeb;
418 	int i, pnum, err, ret = 0;
419 
420 	ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
421 	if (!ech)
422 		return -ENOMEM;
423 
424 	vb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
425 	if (!vb) {
426 		kfree(ech);
427 		return -ENOMEM;
428 	}
429 
430 	vh = ubi_get_vid_hdr(vb);
431 
432 	dbg_bld("scanning fastmap pool: size = %i", pool_size);
433 
434 	/*
435 	 * Now scan all PEBs in the pool to find changes which have been made
436 	 * after the creation of the fastmap
437 	 */
438 	for (i = 0; i < pool_size; i++) {
439 		int scrub = 0;
440 		int image_seq;
441 
442 		pnum = be32_to_cpu(pebs[i]);
443 
444 		if (ubi_io_is_bad(ubi, pnum)) {
445 			ubi_err(ubi, "bad PEB in fastmap pool!");
446 			ret = UBI_BAD_FASTMAP;
447 			goto out;
448 		}
449 
450 		err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
451 		if (err && err != UBI_IO_BITFLIPS) {
452 			ubi_err(ubi, "unable to read EC header! PEB:%i err:%i",
453 				pnum, err);
454 			ret = err > 0 ? UBI_BAD_FASTMAP : err;
455 			goto out;
456 		} else if (err == UBI_IO_BITFLIPS)
457 			scrub = 1;
458 
459 		/*
460 		 * Older UBI implementations have image_seq set to zero, so
461 		 * we shouldn't fail if image_seq == 0.
462 		 */
463 		image_seq = be32_to_cpu(ech->image_seq);
464 
465 		if (image_seq && (image_seq != ubi->image_seq)) {
466 			ubi_err(ubi, "bad image seq: 0x%x, expected: 0x%x",
467 				be32_to_cpu(ech->image_seq), ubi->image_seq);
468 			ret = UBI_BAD_FASTMAP;
469 			goto out;
470 		}
471 
472 		err = ubi_io_read_vid_hdr(ubi, pnum, vb, 0);
473 		if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) {
474 			unsigned long long ec = be64_to_cpu(ech->ec);
475 			unmap_peb(ai, pnum);
476 			dbg_bld("Adding PEB to free: %i", pnum);
477 
478 			if (err == UBI_IO_FF_BITFLIPS)
479 				scrub = 1;
480 
481 			add_aeb(ai, free, pnum, ec, scrub);
482 			continue;
483 		} else if (err == 0 || err == UBI_IO_BITFLIPS) {
484 			dbg_bld("Found non empty PEB:%i in pool", pnum);
485 
486 			if (err == UBI_IO_BITFLIPS)
487 				scrub = 1;
488 
489 			new_aeb = ubi_alloc_aeb(ai, pnum, be64_to_cpu(ech->ec));
490 			if (!new_aeb) {
491 				ret = -ENOMEM;
492 				goto out;
493 			}
494 
495 			new_aeb->lnum = be32_to_cpu(vh->lnum);
496 			new_aeb->sqnum = be64_to_cpu(vh->sqnum);
497 			new_aeb->copy_flag = vh->copy_flag;
498 			new_aeb->scrub = scrub;
499 
500 			if (*max_sqnum < new_aeb->sqnum)
501 				*max_sqnum = new_aeb->sqnum;
502 
503 			err = process_pool_aeb(ubi, ai, vh, new_aeb);
504 			if (err) {
505 				ret = err > 0 ? UBI_BAD_FASTMAP : err;
506 				goto out;
507 			}
508 		} else {
509 			/* We are paranoid and fall back to scanning mode */
510 			ubi_err(ubi, "fastmap pool PEBs contains damaged PEBs!");
511 			ret = err > 0 ? UBI_BAD_FASTMAP : err;
512 			goto out;
513 		}
514 
515 	}
516 
517 out:
518 	ubi_free_vid_buf(vb);
519 	kfree(ech);
520 	return ret;
521 }
522 
523 /**
524  * count_fastmap_pebs - Counts the PEBs found by fastmap.
525  * @ai: The UBI attach info object
526  */
527 static int count_fastmap_pebs(struct ubi_attach_info *ai)
528 {
529 	struct ubi_ainf_peb *aeb;
530 	struct ubi_ainf_volume *av;
531 	struct rb_node *rb1, *rb2;
532 	int n = 0;
533 
534 	list_for_each_entry(aeb, &ai->erase, u.list)
535 		n++;
536 
537 	list_for_each_entry(aeb, &ai->free, u.list)
538 		n++;
539 
540 	ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
541 		ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
542 			n++;
543 
544 	return n;
545 }
546 
547 /**
548  * ubi_attach_fastmap - creates ubi_attach_info from a fastmap.
549  * @ubi: UBI device object
550  * @ai: UBI attach info object
551  * @fm: the fastmap to be attached
552  *
553  * Returns 0 on success, UBI_BAD_FASTMAP if the found fastmap was unusable.
554  * < 0 indicates an internal error.
555  */
556 static int ubi_attach_fastmap(struct ubi_device *ubi,
557 			      struct ubi_attach_info *ai,
558 			      struct ubi_fastmap_layout *fm)
559 {
560 	struct list_head used, free;
561 	struct ubi_ainf_volume *av;
562 	struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb;
563 	struct ubi_fm_sb *fmsb;
564 	struct ubi_fm_hdr *fmhdr;
565 	struct ubi_fm_scan_pool *fmpl, *fmpl_wl;
566 	struct ubi_fm_ec *fmec;
567 	struct ubi_fm_volhdr *fmvhdr;
568 	struct ubi_fm_eba *fm_eba;
569 	int ret, i, j, pool_size, wl_pool_size;
570 	size_t fm_pos = 0, fm_size = ubi->fm_size;
571 	unsigned long long max_sqnum = 0;
572 	void *fm_raw = ubi->fm_buf;
573 
574 	INIT_LIST_HEAD(&used);
575 	INIT_LIST_HEAD(&free);
576 	ai->min_ec = UBI_MAX_ERASECOUNTER;
577 
578 	fmsb = (struct ubi_fm_sb *)(fm_raw);
579 	ai->max_sqnum = fmsb->sqnum;
580 	fm_pos += sizeof(struct ubi_fm_sb);
581 	if (fm_pos >= fm_size)
582 		goto fail_bad;
583 
584 	fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
585 	fm_pos += sizeof(*fmhdr);
586 	if (fm_pos >= fm_size)
587 		goto fail_bad;
588 
589 	if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) {
590 		ubi_err(ubi, "bad fastmap header magic: 0x%x, expected: 0x%x",
591 			be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC);
592 		goto fail_bad;
593 	}
594 
595 	fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
596 	fm_pos += sizeof(*fmpl);
597 	if (fm_pos >= fm_size)
598 		goto fail_bad;
599 	if (be32_to_cpu(fmpl->magic) != UBI_FM_POOL_MAGIC) {
600 		ubi_err(ubi, "bad fastmap pool magic: 0x%x, expected: 0x%x",
601 			be32_to_cpu(fmpl->magic), UBI_FM_POOL_MAGIC);
602 		goto fail_bad;
603 	}
604 
605 	fmpl_wl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
606 	fm_pos += sizeof(*fmpl_wl);
607 	if (fm_pos >= fm_size)
608 		goto fail_bad;
609 	if (be32_to_cpu(fmpl_wl->magic) != UBI_FM_POOL_MAGIC) {
610 		ubi_err(ubi, "bad fastmap WL pool magic: 0x%x, expected: 0x%x",
611 			be32_to_cpu(fmpl_wl->magic), UBI_FM_POOL_MAGIC);
612 		goto fail_bad;
613 	}
614 
615 	pool_size = be16_to_cpu(fmpl->size);
616 	wl_pool_size = be16_to_cpu(fmpl_wl->size);
617 	fm->max_pool_size = be16_to_cpu(fmpl->max_size);
618 	fm->max_wl_pool_size = be16_to_cpu(fmpl_wl->max_size);
619 
620 	if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) {
621 		ubi_err(ubi, "bad pool size: %i", pool_size);
622 		goto fail_bad;
623 	}
624 
625 	if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) {
626 		ubi_err(ubi, "bad WL pool size: %i", wl_pool_size);
627 		goto fail_bad;
628 	}
629 
630 
631 	if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE ||
632 	    fm->max_pool_size < 0) {
633 		ubi_err(ubi, "bad maximal pool size: %i", fm->max_pool_size);
634 		goto fail_bad;
635 	}
636 
637 	if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE ||
638 	    fm->max_wl_pool_size < 0) {
639 		ubi_err(ubi, "bad maximal WL pool size: %i",
640 			fm->max_wl_pool_size);
641 		goto fail_bad;
642 	}
643 
644 	/* read EC values from free list */
645 	for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) {
646 		fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
647 		fm_pos += sizeof(*fmec);
648 		if (fm_pos >= fm_size)
649 			goto fail_bad;
650 
651 		add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
652 			be32_to_cpu(fmec->ec), 0);
653 	}
654 
655 	/* read EC values from used list */
656 	for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) {
657 		fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
658 		fm_pos += sizeof(*fmec);
659 		if (fm_pos >= fm_size)
660 			goto fail_bad;
661 
662 		add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
663 			be32_to_cpu(fmec->ec), 0);
664 	}
665 
666 	/* read EC values from scrub list */
667 	for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) {
668 		fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
669 		fm_pos += sizeof(*fmec);
670 		if (fm_pos >= fm_size)
671 			goto fail_bad;
672 
673 		add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
674 			be32_to_cpu(fmec->ec), 1);
675 	}
676 
677 	/* read EC values from erase list */
678 	for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) {
679 		fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
680 		fm_pos += sizeof(*fmec);
681 		if (fm_pos >= fm_size)
682 			goto fail_bad;
683 
684 		add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
685 			be32_to_cpu(fmec->ec), 1);
686 	}
687 
688 	ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
689 	ai->bad_peb_count = be32_to_cpu(fmhdr->bad_peb_count);
690 
691 	/* Iterate over all volumes and read their EBA table */
692 	for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) {
693 		fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
694 		fm_pos += sizeof(*fmvhdr);
695 		if (fm_pos >= fm_size)
696 			goto fail_bad;
697 
698 		if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) {
699 			ubi_err(ubi, "bad fastmap vol header magic: 0x%x, expected: 0x%x",
700 				be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC);
701 			goto fail_bad;
702 		}
703 
704 		av = add_vol(ai, be32_to_cpu(fmvhdr->vol_id),
705 			     be32_to_cpu(fmvhdr->used_ebs),
706 			     be32_to_cpu(fmvhdr->data_pad),
707 			     fmvhdr->vol_type,
708 			     be32_to_cpu(fmvhdr->last_eb_bytes));
709 
710 		if (IS_ERR(av)) {
711 			if (PTR_ERR(av) == -EEXIST)
712 				ubi_err(ubi, "volume (ID %i) already exists",
713 					fmvhdr->vol_id);
714 
715 			goto fail_bad;
716 		}
717 
718 		ai->vols_found++;
719 		if (ai->highest_vol_id < be32_to_cpu(fmvhdr->vol_id))
720 			ai->highest_vol_id = be32_to_cpu(fmvhdr->vol_id);
721 
722 		fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
723 		fm_pos += sizeof(*fm_eba);
724 		fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs));
725 		if (fm_pos >= fm_size)
726 			goto fail_bad;
727 
728 		if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) {
729 			ubi_err(ubi, "bad fastmap EBA header magic: 0x%x, expected: 0x%x",
730 				be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC);
731 			goto fail_bad;
732 		}
733 
734 		for (j = 0; j < be32_to_cpu(fm_eba->reserved_pebs); j++) {
735 			int pnum = be32_to_cpu(fm_eba->pnum[j]);
736 
737 			if (pnum < 0)
738 				continue;
739 
740 			aeb = NULL;
741 			list_for_each_entry(tmp_aeb, &used, u.list) {
742 				if (tmp_aeb->pnum == pnum) {
743 					aeb = tmp_aeb;
744 					break;
745 				}
746 			}
747 
748 			if (!aeb) {
749 				ubi_err(ubi, "PEB %i is in EBA but not in used list", pnum);
750 				goto fail_bad;
751 			}
752 
753 			aeb->lnum = j;
754 
755 			if (av->highest_lnum <= aeb->lnum)
756 				av->highest_lnum = aeb->lnum;
757 
758 			assign_aeb_to_av(ai, aeb, av);
759 
760 			dbg_bld("inserting PEB:%i (LEB %i) to vol %i",
761 				aeb->pnum, aeb->lnum, av->vol_id);
762 		}
763 	}
764 
765 	ret = scan_pool(ubi, ai, fmpl->pebs, pool_size, &max_sqnum, &free);
766 	if (ret)
767 		goto fail;
768 
769 	ret = scan_pool(ubi, ai, fmpl_wl->pebs, wl_pool_size, &max_sqnum, &free);
770 	if (ret)
771 		goto fail;
772 
773 	if (max_sqnum > ai->max_sqnum)
774 		ai->max_sqnum = max_sqnum;
775 
776 	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list)
777 		list_move_tail(&tmp_aeb->u.list, &ai->free);
778 
779 	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list)
780 		list_move_tail(&tmp_aeb->u.list, &ai->erase);
781 
782 	ubi_assert(list_empty(&free));
783 
784 	/*
785 	 * If fastmap is leaking PEBs (must not happen), raise a
786 	 * fat warning and fall back to scanning mode.
787 	 * We do this here because in ubi_wl_init() it's too late
788 	 * and we cannot fall back to scanning.
789 	 */
790 	if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count -
791 		    ai->bad_peb_count - fm->used_blocks))
792 		goto fail_bad;
793 
794 	return 0;
795 
796 fail_bad:
797 	ret = UBI_BAD_FASTMAP;
798 fail:
799 	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) {
800 		list_del(&tmp_aeb->u.list);
801 		ubi_free_aeb(ai, tmp_aeb);
802 	}
803 	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) {
804 		list_del(&tmp_aeb->u.list);
805 		ubi_free_aeb(ai, tmp_aeb);
806 	}
807 
808 	return ret;
809 }
810 
811 /**
812  * find_fm_anchor - find the most recent Fastmap superblock (anchor)
813  * @ai: UBI attach info to be filled
814  */
815 static int find_fm_anchor(struct ubi_attach_info *ai)
816 {
817 	int ret = -1;
818 	struct ubi_ainf_peb *aeb;
819 	unsigned long long max_sqnum = 0;
820 
821 	list_for_each_entry(aeb, &ai->fastmap, u.list) {
822 		if (aeb->vol_id == UBI_FM_SB_VOLUME_ID && aeb->sqnum > max_sqnum) {
823 			max_sqnum = aeb->sqnum;
824 			ret = aeb->pnum;
825 		}
826 	}
827 
828 	return ret;
829 }
830 
831 static struct ubi_ainf_peb *clone_aeb(struct ubi_attach_info *ai,
832 				      struct ubi_ainf_peb *old)
833 {
834 	struct ubi_ainf_peb *new;
835 
836 	new = ubi_alloc_aeb(ai, old->pnum, old->ec);
837 	if (!new)
838 		return NULL;
839 
840 	new->vol_id = old->vol_id;
841 	new->sqnum = old->sqnum;
842 	new->lnum = old->lnum;
843 	new->scrub = old->scrub;
844 	new->copy_flag = old->copy_flag;
845 
846 	return new;
847 }
848 
849 /**
850  * ubi_scan_fastmap - scan the fastmap.
851  * @ubi: UBI device object
852  * @ai: UBI attach info to be filled
853  * @scan_ai: UBI attach info from the first 64 PEBs,
854  *           used to find the most recent Fastmap data structure
855  *
856  * Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found,
857  * UBI_BAD_FASTMAP if one was found but is not usable.
858  * < 0 indicates an internal error.
859  */
860 int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
861 		     struct ubi_attach_info *scan_ai)
862 {
863 	struct ubi_fm_sb *fmsb, *fmsb2;
864 	struct ubi_vid_io_buf *vb;
865 	struct ubi_vid_hdr *vh;
866 	struct ubi_ec_hdr *ech;
867 	struct ubi_fastmap_layout *fm;
868 	struct ubi_ainf_peb *aeb;
869 	int i, used_blocks, pnum, fm_anchor, ret = 0;
870 	size_t fm_size;
871 	__be32 crc, tmp_crc;
872 	unsigned long long sqnum = 0;
873 
874 	fm_anchor = find_fm_anchor(scan_ai);
875 	if (fm_anchor < 0)
876 		return UBI_NO_FASTMAP;
877 
878 	/* Copy all (possible) fastmap blocks into our new attach structure. */
879 	list_for_each_entry(aeb, &scan_ai->fastmap, u.list) {
880 		struct ubi_ainf_peb *new;
881 
882 		new = clone_aeb(ai, aeb);
883 		if (!new)
884 			return -ENOMEM;
885 
886 		list_add(&new->u.list, &ai->fastmap);
887 	}
888 
889 	down_write(&ubi->fm_protect);
890 	memset(ubi->fm_buf, 0, ubi->fm_size);
891 
892 	fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL);
893 	if (!fmsb) {
894 		ret = -ENOMEM;
895 		goto out;
896 	}
897 
898 	fm = kzalloc(sizeof(*fm), GFP_KERNEL);
899 	if (!fm) {
900 		ret = -ENOMEM;
901 		kfree(fmsb);
902 		goto out;
903 	}
904 
905 	ret = ubi_io_read_data(ubi, fmsb, fm_anchor, 0, sizeof(*fmsb));
906 	if (ret && ret != UBI_IO_BITFLIPS)
907 		goto free_fm_sb;
908 	else if (ret == UBI_IO_BITFLIPS)
909 		fm->to_be_tortured[0] = 1;
910 
911 	if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) {
912 		ubi_err(ubi, "bad super block magic: 0x%x, expected: 0x%x",
913 			be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC);
914 		ret = UBI_BAD_FASTMAP;
915 		goto free_fm_sb;
916 	}
917 
918 	if (fmsb->version != UBI_FM_FMT_VERSION) {
919 		ubi_err(ubi, "bad fastmap version: %i, expected: %i",
920 			fmsb->version, UBI_FM_FMT_VERSION);
921 		ret = UBI_BAD_FASTMAP;
922 		goto free_fm_sb;
923 	}
924 
925 	used_blocks = be32_to_cpu(fmsb->used_blocks);
926 	if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) {
927 		ubi_err(ubi, "number of fastmap blocks is invalid: %i",
928 			used_blocks);
929 		ret = UBI_BAD_FASTMAP;
930 		goto free_fm_sb;
931 	}
932 
933 	fm_size = ubi->leb_size * used_blocks;
934 	if (fm_size != ubi->fm_size) {
935 		ubi_err(ubi, "bad fastmap size: %zi, expected: %zi",
936 			fm_size, ubi->fm_size);
937 		ret = UBI_BAD_FASTMAP;
938 		goto free_fm_sb;
939 	}
940 
941 	ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
942 	if (!ech) {
943 		ret = -ENOMEM;
944 		goto free_fm_sb;
945 	}
946 
947 	vb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
948 	if (!vb) {
949 		ret = -ENOMEM;
950 		goto free_hdr;
951 	}
952 
953 	vh = ubi_get_vid_hdr(vb);
954 
955 	for (i = 0; i < used_blocks; i++) {
956 		int image_seq;
957 
958 		pnum = be32_to_cpu(fmsb->block_loc[i]);
959 
960 		if (ubi_io_is_bad(ubi, pnum)) {
961 			ret = UBI_BAD_FASTMAP;
962 			goto free_hdr;
963 		}
964 
965 		if (i == 0 && pnum != fm_anchor) {
966 			ubi_err(ubi, "Fastmap anchor PEB mismatch: PEB: %i vs. %i",
967 				pnum, fm_anchor);
968 			ret = UBI_BAD_FASTMAP;
969 			goto free_hdr;
970 		}
971 
972 		ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
973 		if (ret && ret != UBI_IO_BITFLIPS) {
974 			ubi_err(ubi, "unable to read fastmap block# %i EC (PEB: %i)",
975 				i, pnum);
976 			if (ret > 0)
977 				ret = UBI_BAD_FASTMAP;
978 			goto free_hdr;
979 		} else if (ret == UBI_IO_BITFLIPS)
980 			fm->to_be_tortured[i] = 1;
981 
982 		image_seq = be32_to_cpu(ech->image_seq);
983 		if (!ubi->image_seq)
984 			ubi->image_seq = image_seq;
985 
986 		/*
987 		 * Older UBI implementations have image_seq set to zero, so
988 		 * we shouldn't fail if image_seq == 0.
989 		 */
990 		if (image_seq && (image_seq != ubi->image_seq)) {
991 			ubi_err(ubi, "wrong image seq:%d instead of %d",
992 				be32_to_cpu(ech->image_seq), ubi->image_seq);
993 			ret = UBI_BAD_FASTMAP;
994 			goto free_hdr;
995 		}
996 
997 		ret = ubi_io_read_vid_hdr(ubi, pnum, vb, 0);
998 		if (ret && ret != UBI_IO_BITFLIPS) {
999 			ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i)",
1000 				i, pnum);
1001 			goto free_hdr;
1002 		}
1003 
1004 		if (i == 0) {
1005 			if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) {
1006 				ubi_err(ubi, "bad fastmap anchor vol_id: 0x%x, expected: 0x%x",
1007 					be32_to_cpu(vh->vol_id),
1008 					UBI_FM_SB_VOLUME_ID);
1009 				ret = UBI_BAD_FASTMAP;
1010 				goto free_hdr;
1011 			}
1012 		} else {
1013 			if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) {
1014 				ubi_err(ubi, "bad fastmap data vol_id: 0x%x, expected: 0x%x",
1015 					be32_to_cpu(vh->vol_id),
1016 					UBI_FM_DATA_VOLUME_ID);
1017 				ret = UBI_BAD_FASTMAP;
1018 				goto free_hdr;
1019 			}
1020 		}
1021 
1022 		if (sqnum < be64_to_cpu(vh->sqnum))
1023 			sqnum = be64_to_cpu(vh->sqnum);
1024 
1025 		ret = ubi_io_read_data(ubi, ubi->fm_buf + (ubi->leb_size * i),
1026 				       pnum, 0, ubi->leb_size);
1027 		if (ret && ret != UBI_IO_BITFLIPS) {
1028 			ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i, "
1029 				"err: %i)", i, pnum, ret);
1030 			goto free_hdr;
1031 		}
1032 	}
1033 
1034 	kfree(fmsb);
1035 	fmsb = NULL;
1036 
1037 	fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf);
1038 	tmp_crc = be32_to_cpu(fmsb2->data_crc);
1039 	fmsb2->data_crc = 0;
1040 	crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size);
1041 	if (crc != tmp_crc) {
1042 		ubi_err(ubi, "fastmap data CRC is invalid");
1043 		ubi_err(ubi, "CRC should be: 0x%x, calc: 0x%x",
1044 			tmp_crc, crc);
1045 		ret = UBI_BAD_FASTMAP;
1046 		goto free_hdr;
1047 	}
1048 
1049 	fmsb2->sqnum = sqnum;
1050 
1051 	fm->used_blocks = used_blocks;
1052 
1053 	ret = ubi_attach_fastmap(ubi, ai, fm);
1054 	if (ret) {
1055 		if (ret > 0)
1056 			ret = UBI_BAD_FASTMAP;
1057 		goto free_hdr;
1058 	}
1059 
1060 	for (i = 0; i < used_blocks; i++) {
1061 		struct ubi_wl_entry *e;
1062 
1063 		e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1064 		if (!e) {
1065 			while (i--)
1066 				kfree(fm->e[i]);
1067 
1068 			ret = -ENOMEM;
1069 			goto free_hdr;
1070 		}
1071 
1072 		e->pnum = be32_to_cpu(fmsb2->block_loc[i]);
1073 		e->ec = be32_to_cpu(fmsb2->block_ec[i]);
1074 		fm->e[i] = e;
1075 	}
1076 
1077 	ubi->fm = fm;
1078 	ubi->fm_pool.max_size = ubi->fm->max_pool_size;
1079 	ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size;
1080 	ubi_msg(ubi, "attached by fastmap");
1081 	ubi_msg(ubi, "fastmap pool size: %d", ubi->fm_pool.max_size);
1082 	ubi_msg(ubi, "fastmap WL pool size: %d",
1083 		ubi->fm_wl_pool.max_size);
1084 	ubi->fm_disabled = 0;
1085 	ubi->fast_attach = 1;
1086 
1087 	ubi_free_vid_buf(vb);
1088 	kfree(ech);
1089 out:
1090 	up_write(&ubi->fm_protect);
1091 	if (ret == UBI_BAD_FASTMAP)
1092 		ubi_err(ubi, "Attach by fastmap failed, doing a full scan!");
1093 	return ret;
1094 
1095 free_hdr:
1096 	ubi_free_vid_buf(vb);
1097 	kfree(ech);
1098 free_fm_sb:
1099 	kfree(fmsb);
1100 	kfree(fm);
1101 	goto out;
1102 }
1103 
1104 /**
1105  * ubi_write_fastmap - writes a fastmap.
1106  * @ubi: UBI device object
1107  * @new_fm: the to be written fastmap
1108  *
1109  * Returns 0 on success, < 0 indicates an internal error.
1110  */
1111 static int ubi_write_fastmap(struct ubi_device *ubi,
1112 			     struct ubi_fastmap_layout *new_fm)
1113 {
1114 	size_t fm_pos = 0;
1115 	void *fm_raw;
1116 	struct ubi_fm_sb *fmsb;
1117 	struct ubi_fm_hdr *fmh;
1118 	struct ubi_fm_scan_pool *fmpl, *fmpl_wl;
1119 	struct ubi_fm_ec *fec;
1120 	struct ubi_fm_volhdr *fvh;
1121 	struct ubi_fm_eba *feba;
1122 	struct ubi_wl_entry *wl_e;
1123 	struct ubi_volume *vol;
1124 	struct ubi_vid_io_buf *avbuf, *dvbuf;
1125 	struct ubi_vid_hdr *avhdr, *dvhdr;
1126 	struct ubi_work *ubi_wrk;
1127 	struct rb_node *tmp_rb;
1128 	int ret, i, j, free_peb_count, used_peb_count, vol_count;
1129 	int scrub_peb_count, erase_peb_count;
1130 	unsigned long *seen_pebs = NULL;
1131 
1132 	fm_raw = ubi->fm_buf;
1133 	memset(ubi->fm_buf, 0, ubi->fm_size);
1134 
1135 	avbuf = new_fm_vbuf(ubi, UBI_FM_SB_VOLUME_ID);
1136 	if (!avbuf) {
1137 		ret = -ENOMEM;
1138 		goto out;
1139 	}
1140 
1141 	dvbuf = new_fm_vbuf(ubi, UBI_FM_DATA_VOLUME_ID);
1142 	if (!dvbuf) {
1143 		ret = -ENOMEM;
1144 		goto out_kfree;
1145 	}
1146 
1147 	avhdr = ubi_get_vid_hdr(avbuf);
1148 	dvhdr = ubi_get_vid_hdr(dvbuf);
1149 
1150 	seen_pebs = init_seen(ubi);
1151 	if (IS_ERR(seen_pebs)) {
1152 		ret = PTR_ERR(seen_pebs);
1153 		goto out_kfree;
1154 	}
1155 
1156 	spin_lock(&ubi->volumes_lock);
1157 	spin_lock(&ubi->wl_lock);
1158 
1159 	fmsb = (struct ubi_fm_sb *)fm_raw;
1160 	fm_pos += sizeof(*fmsb);
1161 	ubi_assert(fm_pos <= ubi->fm_size);
1162 
1163 	fmh = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
1164 	fm_pos += sizeof(*fmh);
1165 	ubi_assert(fm_pos <= ubi->fm_size);
1166 
1167 	fmsb->magic = cpu_to_be32(UBI_FM_SB_MAGIC);
1168 	fmsb->version = UBI_FM_FMT_VERSION;
1169 	fmsb->used_blocks = cpu_to_be32(new_fm->used_blocks);
1170 	/* the max sqnum will be filled in while *reading* the fastmap */
1171 	fmsb->sqnum = 0;
1172 
1173 	fmh->magic = cpu_to_be32(UBI_FM_HDR_MAGIC);
1174 	free_peb_count = 0;
1175 	used_peb_count = 0;
1176 	scrub_peb_count = 0;
1177 	erase_peb_count = 0;
1178 	vol_count = 0;
1179 
1180 	fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1181 	fm_pos += sizeof(*fmpl);
1182 	fmpl->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1183 	fmpl->size = cpu_to_be16(ubi->fm_pool.size);
1184 	fmpl->max_size = cpu_to_be16(ubi->fm_pool.max_size);
1185 
1186 	for (i = 0; i < ubi->fm_pool.size; i++) {
1187 		fmpl->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]);
1188 		set_seen(ubi, ubi->fm_pool.pebs[i], seen_pebs);
1189 	}
1190 
1191 	fmpl_wl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1192 	fm_pos += sizeof(*fmpl_wl);
1193 	fmpl_wl->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1194 	fmpl_wl->size = cpu_to_be16(ubi->fm_wl_pool.size);
1195 	fmpl_wl->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size);
1196 
1197 	for (i = 0; i < ubi->fm_wl_pool.size; i++) {
1198 		fmpl_wl->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]);
1199 		set_seen(ubi, ubi->fm_wl_pool.pebs[i], seen_pebs);
1200 	}
1201 
1202 	ubi_for_each_free_peb(ubi, wl_e, tmp_rb) {
1203 		fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1204 
1205 		fec->pnum = cpu_to_be32(wl_e->pnum);
1206 		set_seen(ubi, wl_e->pnum, seen_pebs);
1207 		fec->ec = cpu_to_be32(wl_e->ec);
1208 
1209 		free_peb_count++;
1210 		fm_pos += sizeof(*fec);
1211 		ubi_assert(fm_pos <= ubi->fm_size);
1212 	}
1213 	fmh->free_peb_count = cpu_to_be32(free_peb_count);
1214 
1215 	ubi_for_each_used_peb(ubi, wl_e, tmp_rb) {
1216 		fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1217 
1218 		fec->pnum = cpu_to_be32(wl_e->pnum);
1219 		set_seen(ubi, wl_e->pnum, seen_pebs);
1220 		fec->ec = cpu_to_be32(wl_e->ec);
1221 
1222 		used_peb_count++;
1223 		fm_pos += sizeof(*fec);
1224 		ubi_assert(fm_pos <= ubi->fm_size);
1225 	}
1226 
1227 	ubi_for_each_protected_peb(ubi, i, wl_e) {
1228 		fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1229 
1230 		fec->pnum = cpu_to_be32(wl_e->pnum);
1231 		set_seen(ubi, wl_e->pnum, seen_pebs);
1232 		fec->ec = cpu_to_be32(wl_e->ec);
1233 
1234 		used_peb_count++;
1235 		fm_pos += sizeof(*fec);
1236 		ubi_assert(fm_pos <= ubi->fm_size);
1237 	}
1238 	fmh->used_peb_count = cpu_to_be32(used_peb_count);
1239 
1240 	ubi_for_each_scrub_peb(ubi, wl_e, tmp_rb) {
1241 		fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1242 
1243 		fec->pnum = cpu_to_be32(wl_e->pnum);
1244 		set_seen(ubi, wl_e->pnum, seen_pebs);
1245 		fec->ec = cpu_to_be32(wl_e->ec);
1246 
1247 		scrub_peb_count++;
1248 		fm_pos += sizeof(*fec);
1249 		ubi_assert(fm_pos <= ubi->fm_size);
1250 	}
1251 	fmh->scrub_peb_count = cpu_to_be32(scrub_peb_count);
1252 
1253 
1254 	list_for_each_entry(ubi_wrk, &ubi->works, list) {
1255 		if (ubi_is_erase_work(ubi_wrk)) {
1256 			wl_e = ubi_wrk->e;
1257 			ubi_assert(wl_e);
1258 
1259 			fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1260 
1261 			fec->pnum = cpu_to_be32(wl_e->pnum);
1262 			set_seen(ubi, wl_e->pnum, seen_pebs);
1263 			fec->ec = cpu_to_be32(wl_e->ec);
1264 
1265 			erase_peb_count++;
1266 			fm_pos += sizeof(*fec);
1267 			ubi_assert(fm_pos <= ubi->fm_size);
1268 		}
1269 	}
1270 	fmh->erase_peb_count = cpu_to_be32(erase_peb_count);
1271 
1272 	for (i = 0; i < UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT; i++) {
1273 		vol = ubi->volumes[i];
1274 
1275 		if (!vol)
1276 			continue;
1277 
1278 		vol_count++;
1279 
1280 		fvh = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
1281 		fm_pos += sizeof(*fvh);
1282 		ubi_assert(fm_pos <= ubi->fm_size);
1283 
1284 		fvh->magic = cpu_to_be32(UBI_FM_VHDR_MAGIC);
1285 		fvh->vol_id = cpu_to_be32(vol->vol_id);
1286 		fvh->vol_type = vol->vol_type;
1287 		fvh->used_ebs = cpu_to_be32(vol->used_ebs);
1288 		fvh->data_pad = cpu_to_be32(vol->data_pad);
1289 		fvh->last_eb_bytes = cpu_to_be32(vol->last_eb_bytes);
1290 
1291 		ubi_assert(vol->vol_type == UBI_DYNAMIC_VOLUME ||
1292 			vol->vol_type == UBI_STATIC_VOLUME);
1293 
1294 		feba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
1295 		fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->reserved_pebs);
1296 		ubi_assert(fm_pos <= ubi->fm_size);
1297 
1298 		for (j = 0; j < vol->reserved_pebs; j++) {
1299 			struct ubi_eba_leb_desc ldesc;
1300 
1301 			ubi_eba_get_ldesc(vol, j, &ldesc);
1302 			feba->pnum[j] = cpu_to_be32(ldesc.pnum);
1303 		}
1304 
1305 		feba->reserved_pebs = cpu_to_be32(j);
1306 		feba->magic = cpu_to_be32(UBI_FM_EBA_MAGIC);
1307 	}
1308 	fmh->vol_count = cpu_to_be32(vol_count);
1309 	fmh->bad_peb_count = cpu_to_be32(ubi->bad_peb_count);
1310 
1311 	avhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1312 	avhdr->lnum = 0;
1313 
1314 	spin_unlock(&ubi->wl_lock);
1315 	spin_unlock(&ubi->volumes_lock);
1316 
1317 	dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum);
1318 	ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avbuf);
1319 	if (ret) {
1320 		ubi_err(ubi, "unable to write vid_hdr to fastmap SB!");
1321 		goto out_kfree;
1322 	}
1323 
1324 	for (i = 0; i < new_fm->used_blocks; i++) {
1325 		fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum);
1326 		set_seen(ubi, new_fm->e[i]->pnum, seen_pebs);
1327 		fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec);
1328 	}
1329 
1330 	fmsb->data_crc = 0;
1331 	fmsb->data_crc = cpu_to_be32(crc32(UBI_CRC32_INIT, fm_raw,
1332 					   ubi->fm_size));
1333 
1334 	for (i = 1; i < new_fm->used_blocks; i++) {
1335 		dvhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1336 		dvhdr->lnum = cpu_to_be32(i);
1337 		dbg_bld("writing fastmap data to PEB %i sqnum %llu",
1338 			new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum));
1339 		ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvbuf);
1340 		if (ret) {
1341 			ubi_err(ubi, "unable to write vid_hdr to PEB %i!",
1342 				new_fm->e[i]->pnum);
1343 			goto out_kfree;
1344 		}
1345 	}
1346 
1347 	for (i = 0; i < new_fm->used_blocks; i++) {
1348 		ret = ubi_io_write_data(ubi, fm_raw + (i * ubi->leb_size),
1349 					new_fm->e[i]->pnum, 0, ubi->leb_size);
1350 		if (ret) {
1351 			ubi_err(ubi, "unable to write fastmap to PEB %i!",
1352 				new_fm->e[i]->pnum);
1353 			goto out_kfree;
1354 		}
1355 	}
1356 
1357 	ubi_assert(new_fm);
1358 	ubi->fm = new_fm;
1359 
1360 	ret = self_check_seen(ubi, seen_pebs);
1361 	dbg_bld("fastmap written!");
1362 
1363 out_kfree:
1364 	ubi_free_vid_buf(avbuf);
1365 	ubi_free_vid_buf(dvbuf);
1366 	free_seen(seen_pebs);
1367 out:
1368 	return ret;
1369 }
1370 
1371 /**
1372  * erase_block - Manually erase a PEB.
1373  * @ubi: UBI device object
1374  * @pnum: PEB to be erased
1375  *
1376  * Returns the new EC value on success, < 0 indicates an internal error.
1377  */
1378 static int erase_block(struct ubi_device *ubi, int pnum)
1379 {
1380 	int ret;
1381 	struct ubi_ec_hdr *ec_hdr;
1382 	long long ec;
1383 
1384 	ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
1385 	if (!ec_hdr)
1386 		return -ENOMEM;
1387 
1388 	ret = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1389 	if (ret < 0)
1390 		goto out;
1391 	else if (ret && ret != UBI_IO_BITFLIPS) {
1392 		ret = -EINVAL;
1393 		goto out;
1394 	}
1395 
1396 	ret = ubi_io_sync_erase(ubi, pnum, 0);
1397 	if (ret < 0)
1398 		goto out;
1399 
1400 	ec = be64_to_cpu(ec_hdr->ec);
1401 	ec += ret;
1402 	if (ec > UBI_MAX_ERASECOUNTER) {
1403 		ret = -EINVAL;
1404 		goto out;
1405 	}
1406 
1407 	ec_hdr->ec = cpu_to_be64(ec);
1408 	ret = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
1409 	if (ret < 0)
1410 		goto out;
1411 
1412 	ret = ec;
1413 out:
1414 	kfree(ec_hdr);
1415 	return ret;
1416 }
1417 
1418 /**
1419  * invalidate_fastmap - destroys a fastmap.
1420  * @ubi: UBI device object
1421  *
1422  * This function ensures that upon next UBI attach a full scan
1423  * is issued. We need this if UBI is about to write a new fastmap
1424  * but is unable to do so. In this case we have two options:
1425  * a) Make sure that the current fastmap will not be usued upon
1426  * attach time and contine or b) fall back to RO mode to have the
1427  * current fastmap in a valid state.
1428  * Returns 0 on success, < 0 indicates an internal error.
1429  */
1430 static int invalidate_fastmap(struct ubi_device *ubi)
1431 {
1432 	int ret;
1433 	struct ubi_fastmap_layout *fm;
1434 	struct ubi_wl_entry *e;
1435 	struct ubi_vid_io_buf *vb = NULL;
1436 	struct ubi_vid_hdr *vh;
1437 
1438 	if (!ubi->fm)
1439 		return 0;
1440 
1441 	ubi->fm = NULL;
1442 
1443 	ret = -ENOMEM;
1444 	fm = kzalloc(sizeof(*fm), GFP_KERNEL);
1445 	if (!fm)
1446 		goto out;
1447 
1448 	vb = new_fm_vbuf(ubi, UBI_FM_SB_VOLUME_ID);
1449 	if (!vb)
1450 		goto out_free_fm;
1451 
1452 	vh = ubi_get_vid_hdr(vb);
1453 
1454 	ret = -ENOSPC;
1455 	e = ubi_wl_get_fm_peb(ubi, 1);
1456 	if (!e)
1457 		goto out_free_fm;
1458 
1459 	/*
1460 	 * Create fake fastmap such that UBI will fall back
1461 	 * to scanning mode.
1462 	 */
1463 	vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1464 	ret = ubi_io_write_vid_hdr(ubi, e->pnum, vb);
1465 	if (ret < 0) {
1466 		ubi_wl_put_fm_peb(ubi, e, 0, 0);
1467 		goto out_free_fm;
1468 	}
1469 
1470 	fm->used_blocks = 1;
1471 	fm->e[0] = e;
1472 
1473 	ubi->fm = fm;
1474 
1475 out:
1476 	ubi_free_vid_buf(vb);
1477 	return ret;
1478 
1479 out_free_fm:
1480 	kfree(fm);
1481 	goto out;
1482 }
1483 
1484 /**
1485  * return_fm_pebs - returns all PEBs used by a fastmap back to the
1486  * WL sub-system.
1487  * @ubi: UBI device object
1488  * @fm: fastmap layout object
1489  */
1490 static void return_fm_pebs(struct ubi_device *ubi,
1491 			   struct ubi_fastmap_layout *fm)
1492 {
1493 	int i;
1494 
1495 	if (!fm)
1496 		return;
1497 
1498 	for (i = 0; i < fm->used_blocks; i++) {
1499 		if (fm->e[i]) {
1500 			ubi_wl_put_fm_peb(ubi, fm->e[i], i,
1501 					  fm->to_be_tortured[i]);
1502 			fm->e[i] = NULL;
1503 		}
1504 	}
1505 }
1506 
1507 /**
1508  * ubi_update_fastmap - will be called by UBI if a volume changes or
1509  * a fastmap pool becomes full.
1510  * @ubi: UBI device object
1511  *
1512  * Returns 0 on success, < 0 indicates an internal error.
1513  */
1514 int ubi_update_fastmap(struct ubi_device *ubi)
1515 {
1516 	int ret, i, j;
1517 	struct ubi_fastmap_layout *new_fm, *old_fm;
1518 	struct ubi_wl_entry *tmp_e;
1519 
1520 	down_write(&ubi->fm_protect);
1521 	down_write(&ubi->work_sem);
1522 	down_write(&ubi->fm_eba_sem);
1523 
1524 	ubi_refill_pools(ubi);
1525 
1526 	if (ubi->ro_mode || ubi->fm_disabled) {
1527 		up_write(&ubi->fm_eba_sem);
1528 		up_write(&ubi->work_sem);
1529 		up_write(&ubi->fm_protect);
1530 		return 0;
1531 	}
1532 
1533 	ret = ubi_ensure_anchor_pebs(ubi);
1534 	if (ret) {
1535 		up_write(&ubi->fm_eba_sem);
1536 		up_write(&ubi->work_sem);
1537 		up_write(&ubi->fm_protect);
1538 		return ret;
1539 	}
1540 
1541 	new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
1542 	if (!new_fm) {
1543 		up_write(&ubi->fm_eba_sem);
1544 		up_write(&ubi->work_sem);
1545 		up_write(&ubi->fm_protect);
1546 		return -ENOMEM;
1547 	}
1548 
1549 	new_fm->used_blocks = ubi->fm_size / ubi->leb_size;
1550 	old_fm = ubi->fm;
1551 	ubi->fm = NULL;
1552 
1553 	if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) {
1554 		ubi_err(ubi, "fastmap too large");
1555 		ret = -ENOSPC;
1556 		goto err;
1557 	}
1558 
1559 	for (i = 1; i < new_fm->used_blocks; i++) {
1560 		spin_lock(&ubi->wl_lock);
1561 		tmp_e = ubi_wl_get_fm_peb(ubi, 0);
1562 		spin_unlock(&ubi->wl_lock);
1563 
1564 		if (!tmp_e) {
1565 			if (old_fm && old_fm->e[i]) {
1566 				ret = erase_block(ubi, old_fm->e[i]->pnum);
1567 				if (ret < 0) {
1568 					ubi_err(ubi, "could not erase old fastmap PEB");
1569 
1570 					for (j = 1; j < i; j++) {
1571 						ubi_wl_put_fm_peb(ubi, new_fm->e[j],
1572 								  j, 0);
1573 						new_fm->e[j] = NULL;
1574 					}
1575 					goto err;
1576 				}
1577 				new_fm->e[i] = old_fm->e[i];
1578 				old_fm->e[i] = NULL;
1579 			} else {
1580 				ubi_err(ubi, "could not get any free erase block");
1581 
1582 				for (j = 1; j < i; j++) {
1583 					ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0);
1584 					new_fm->e[j] = NULL;
1585 				}
1586 
1587 				ret = -ENOSPC;
1588 				goto err;
1589 			}
1590 		} else {
1591 			new_fm->e[i] = tmp_e;
1592 
1593 			if (old_fm && old_fm->e[i]) {
1594 				ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
1595 						  old_fm->to_be_tortured[i]);
1596 				old_fm->e[i] = NULL;
1597 			}
1598 		}
1599 	}
1600 
1601 	/* Old fastmap is larger than the new one */
1602 	if (old_fm && new_fm->used_blocks < old_fm->used_blocks) {
1603 		for (i = new_fm->used_blocks; i < old_fm->used_blocks; i++) {
1604 			ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
1605 					  old_fm->to_be_tortured[i]);
1606 			old_fm->e[i] = NULL;
1607 		}
1608 	}
1609 
1610 	spin_lock(&ubi->wl_lock);
1611 	tmp_e = ubi_wl_get_fm_peb(ubi, 1);
1612 	spin_unlock(&ubi->wl_lock);
1613 
1614 	if (old_fm) {
1615 		/* no fresh anchor PEB was found, reuse the old one */
1616 		if (!tmp_e) {
1617 			ret = erase_block(ubi, old_fm->e[0]->pnum);
1618 			if (ret < 0) {
1619 				ubi_err(ubi, "could not erase old anchor PEB");
1620 
1621 				for (i = 1; i < new_fm->used_blocks; i++) {
1622 					ubi_wl_put_fm_peb(ubi, new_fm->e[i],
1623 							  i, 0);
1624 					new_fm->e[i] = NULL;
1625 				}
1626 				goto err;
1627 			}
1628 			new_fm->e[0] = old_fm->e[0];
1629 			new_fm->e[0]->ec = ret;
1630 			old_fm->e[0] = NULL;
1631 		} else {
1632 			/* we've got a new anchor PEB, return the old one */
1633 			ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0,
1634 					  old_fm->to_be_tortured[0]);
1635 			new_fm->e[0] = tmp_e;
1636 			old_fm->e[0] = NULL;
1637 		}
1638 	} else {
1639 		if (!tmp_e) {
1640 			ubi_err(ubi, "could not find any anchor PEB");
1641 
1642 			for (i = 1; i < new_fm->used_blocks; i++) {
1643 				ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0);
1644 				new_fm->e[i] = NULL;
1645 			}
1646 
1647 			ret = -ENOSPC;
1648 			goto err;
1649 		}
1650 		new_fm->e[0] = tmp_e;
1651 	}
1652 
1653 	ret = ubi_write_fastmap(ubi, new_fm);
1654 
1655 	if (ret)
1656 		goto err;
1657 
1658 out_unlock:
1659 	up_write(&ubi->fm_eba_sem);
1660 	up_write(&ubi->work_sem);
1661 	up_write(&ubi->fm_protect);
1662 	kfree(old_fm);
1663 	return ret;
1664 
1665 err:
1666 	ubi_warn(ubi, "Unable to write new fastmap, err=%i", ret);
1667 
1668 	ret = invalidate_fastmap(ubi);
1669 	if (ret < 0) {
1670 		ubi_err(ubi, "Unable to invalidiate current fastmap!");
1671 		ubi_ro_mode(ubi);
1672 	} else {
1673 		return_fm_pebs(ubi, old_fm);
1674 		return_fm_pebs(ubi, new_fm);
1675 		ret = 0;
1676 	}
1677 
1678 	kfree(new_fm);
1679 	goto out_unlock;
1680 }
1681