xref: /openbmc/linux/drivers/mtd/ubi/fastmap.c (revision 2eb0f624b709e78ec8e2f4c3412947703db99301)
1 /*
2  * Copyright (c) 2012 Linutronix GmbH
3  * Copyright (c) 2014 sigma star gmbh
4  * Author: Richard Weinberger <richard@nod.at>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; version 2.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
13  * the GNU General Public License for more details.
14  *
15  */
16 
17 #include <linux/crc32.h>
18 #include <linux/bitmap.h>
19 #include "ubi.h"
20 
21 /**
22  * init_seen - allocate memory for used for debugging.
23  * @ubi: UBI device description object
24  */
25 static inline unsigned long *init_seen(struct ubi_device *ubi)
26 {
27 	unsigned long *ret;
28 
29 	if (!ubi_dbg_chk_fastmap(ubi))
30 		return NULL;
31 
32 	ret = kcalloc(BITS_TO_LONGS(ubi->peb_count), sizeof(unsigned long),
33 		      GFP_KERNEL);
34 	if (!ret)
35 		return ERR_PTR(-ENOMEM);
36 
37 	return ret;
38 }
39 
40 /**
41  * free_seen - free the seen logic integer array.
42  * @seen: integer array of @ubi->peb_count size
43  */
44 static inline void free_seen(unsigned long *seen)
45 {
46 	kfree(seen);
47 }
48 
49 /**
50  * set_seen - mark a PEB as seen.
51  * @ubi: UBI device description object
52  * @pnum: The PEB to be makred as seen
53  * @seen: integer array of @ubi->peb_count size
54  */
55 static inline void set_seen(struct ubi_device *ubi, int pnum, unsigned long *seen)
56 {
57 	if (!ubi_dbg_chk_fastmap(ubi) || !seen)
58 		return;
59 
60 	set_bit(pnum, seen);
61 }
62 
63 /**
64  * self_check_seen - check whether all PEB have been seen by fastmap.
65  * @ubi: UBI device description object
66  * @seen: integer array of @ubi->peb_count size
67  */
68 static int self_check_seen(struct ubi_device *ubi, unsigned long *seen)
69 {
70 	int pnum, ret = 0;
71 
72 	if (!ubi_dbg_chk_fastmap(ubi) || !seen)
73 		return 0;
74 
75 	for (pnum = 0; pnum < ubi->peb_count; pnum++) {
76 		if (test_bit(pnum, seen) && ubi->lookuptbl[pnum]) {
77 			ubi_err(ubi, "self-check failed for PEB %d, fastmap didn't see it", pnum);
78 			ret = -EINVAL;
79 		}
80 	}
81 
82 	return ret;
83 }
84 
85 /**
86  * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device.
87  * @ubi: UBI device description object
88  */
89 size_t ubi_calc_fm_size(struct ubi_device *ubi)
90 {
91 	size_t size;
92 
93 	size = sizeof(struct ubi_fm_sb) +
94 		sizeof(struct ubi_fm_hdr) +
95 		sizeof(struct ubi_fm_scan_pool) +
96 		sizeof(struct ubi_fm_scan_pool) +
97 		(ubi->peb_count * sizeof(struct ubi_fm_ec)) +
98 		(sizeof(struct ubi_fm_eba) +
99 		(ubi->peb_count * sizeof(__be32))) +
100 		sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
101 	return roundup(size, ubi->leb_size);
102 }
103 
104 
105 /**
106  * new_fm_vhdr - allocate a new volume header for fastmap usage.
107  * @ubi: UBI device description object
108  * @vol_id: the VID of the new header
109  *
110  * Returns a new struct ubi_vid_hdr on success.
111  * NULL indicates out of memory.
112  */
113 static struct ubi_vid_io_buf *new_fm_vbuf(struct ubi_device *ubi, int vol_id)
114 {
115 	struct ubi_vid_io_buf *new;
116 	struct ubi_vid_hdr *vh;
117 
118 	new = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
119 	if (!new)
120 		goto out;
121 
122 	vh = ubi_get_vid_hdr(new);
123 	vh->vol_type = UBI_VID_DYNAMIC;
124 	vh->vol_id = cpu_to_be32(vol_id);
125 
126 	/* UBI implementations without fastmap support have to delete the
127 	 * fastmap.
128 	 */
129 	vh->compat = UBI_COMPAT_DELETE;
130 
131 out:
132 	return new;
133 }
134 
135 /**
136  * add_aeb - create and add a attach erase block to a given list.
137  * @ai: UBI attach info object
138  * @list: the target list
139  * @pnum: PEB number of the new attach erase block
140  * @ec: erease counter of the new LEB
141  * @scrub: scrub this PEB after attaching
142  *
143  * Returns 0 on success, < 0 indicates an internal error.
144  */
145 static int add_aeb(struct ubi_attach_info *ai, struct list_head *list,
146 		   int pnum, int ec, int scrub)
147 {
148 	struct ubi_ainf_peb *aeb;
149 
150 	aeb = ubi_alloc_aeb(ai, pnum, ec);
151 	if (!aeb)
152 		return -ENOMEM;
153 
154 	aeb->lnum = -1;
155 	aeb->scrub = scrub;
156 	aeb->copy_flag = aeb->sqnum = 0;
157 
158 	ai->ec_sum += aeb->ec;
159 	ai->ec_count++;
160 
161 	if (ai->max_ec < aeb->ec)
162 		ai->max_ec = aeb->ec;
163 
164 	if (ai->min_ec > aeb->ec)
165 		ai->min_ec = aeb->ec;
166 
167 	list_add_tail(&aeb->u.list, list);
168 
169 	return 0;
170 }
171 
172 /**
173  * add_vol - create and add a new volume to ubi_attach_info.
174  * @ai: ubi_attach_info object
175  * @vol_id: VID of the new volume
176  * @used_ebs: number of used EBS
177  * @data_pad: data padding value of the new volume
178  * @vol_type: volume type
179  * @last_eb_bytes: number of bytes in the last LEB
180  *
181  * Returns the new struct ubi_ainf_volume on success.
182  * NULL indicates an error.
183  */
184 static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
185 				       int used_ebs, int data_pad, u8 vol_type,
186 				       int last_eb_bytes)
187 {
188 	struct ubi_ainf_volume *av;
189 
190 	av = ubi_add_av(ai, vol_id);
191 	if (IS_ERR(av))
192 		return av;
193 
194 	av->data_pad = data_pad;
195 	av->last_data_size = last_eb_bytes;
196 	av->compat = 0;
197 	av->vol_type = vol_type;
198 	if (av->vol_type == UBI_STATIC_VOLUME)
199 		av->used_ebs = used_ebs;
200 
201 	dbg_bld("found volume (ID %i)", vol_id);
202 	return av;
203 }
204 
205 /**
206  * assign_aeb_to_av - assigns a SEB to a given ainf_volume and removes it
207  * from it's original list.
208  * @ai: ubi_attach_info object
209  * @aeb: the to be assigned SEB
210  * @av: target scan volume
211  */
212 static void assign_aeb_to_av(struct ubi_attach_info *ai,
213 			     struct ubi_ainf_peb *aeb,
214 			     struct ubi_ainf_volume *av)
215 {
216 	struct ubi_ainf_peb *tmp_aeb;
217 	struct rb_node **p = &av->root.rb_node, *parent = NULL;
218 
219 	while (*p) {
220 		parent = *p;
221 
222 		tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
223 		if (aeb->lnum != tmp_aeb->lnum) {
224 			if (aeb->lnum < tmp_aeb->lnum)
225 				p = &(*p)->rb_left;
226 			else
227 				p = &(*p)->rb_right;
228 
229 			continue;
230 		} else
231 			break;
232 	}
233 
234 	list_del(&aeb->u.list);
235 	av->leb_count++;
236 
237 	rb_link_node(&aeb->u.rb, parent, p);
238 	rb_insert_color(&aeb->u.rb, &av->root);
239 }
240 
241 /**
242  * update_vol - inserts or updates a LEB which was found a pool.
243  * @ubi: the UBI device object
244  * @ai: attach info object
245  * @av: the volume this LEB belongs to
246  * @new_vh: the volume header derived from new_aeb
247  * @new_aeb: the AEB to be examined
248  *
249  * Returns 0 on success, < 0 indicates an internal error.
250  */
251 static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
252 		      struct ubi_ainf_volume *av, struct ubi_vid_hdr *new_vh,
253 		      struct ubi_ainf_peb *new_aeb)
254 {
255 	struct rb_node **p = &av->root.rb_node, *parent = NULL;
256 	struct ubi_ainf_peb *aeb, *victim;
257 	int cmp_res;
258 
259 	while (*p) {
260 		parent = *p;
261 		aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
262 
263 		if (be32_to_cpu(new_vh->lnum) != aeb->lnum) {
264 			if (be32_to_cpu(new_vh->lnum) < aeb->lnum)
265 				p = &(*p)->rb_left;
266 			else
267 				p = &(*p)->rb_right;
268 
269 			continue;
270 		}
271 
272 		/* This case can happen if the fastmap gets written
273 		 * because of a volume change (creation, deletion, ..).
274 		 * Then a PEB can be within the persistent EBA and the pool.
275 		 */
276 		if (aeb->pnum == new_aeb->pnum) {
277 			ubi_assert(aeb->lnum == new_aeb->lnum);
278 			ubi_free_aeb(ai, new_aeb);
279 
280 			return 0;
281 		}
282 
283 		cmp_res = ubi_compare_lebs(ubi, aeb, new_aeb->pnum, new_vh);
284 		if (cmp_res < 0)
285 			return cmp_res;
286 
287 		/* new_aeb is newer */
288 		if (cmp_res & 1) {
289 			victim = ubi_alloc_aeb(ai, aeb->pnum, aeb->ec);
290 			if (!victim)
291 				return -ENOMEM;
292 
293 			list_add_tail(&victim->u.list, &ai->erase);
294 
295 			if (av->highest_lnum == be32_to_cpu(new_vh->lnum))
296 				av->last_data_size =
297 					be32_to_cpu(new_vh->data_size);
298 
299 			dbg_bld("vol %i: AEB %i's PEB %i is the newer",
300 				av->vol_id, aeb->lnum, new_aeb->pnum);
301 
302 			aeb->ec = new_aeb->ec;
303 			aeb->pnum = new_aeb->pnum;
304 			aeb->copy_flag = new_vh->copy_flag;
305 			aeb->scrub = new_aeb->scrub;
306 			aeb->sqnum = new_aeb->sqnum;
307 			ubi_free_aeb(ai, new_aeb);
308 
309 		/* new_aeb is older */
310 		} else {
311 			dbg_bld("vol %i: AEB %i's PEB %i is old, dropping it",
312 				av->vol_id, aeb->lnum, new_aeb->pnum);
313 			list_add_tail(&new_aeb->u.list, &ai->erase);
314 		}
315 
316 		return 0;
317 	}
318 	/* This LEB is new, let's add it to the volume */
319 
320 	if (av->highest_lnum <= be32_to_cpu(new_vh->lnum)) {
321 		av->highest_lnum = be32_to_cpu(new_vh->lnum);
322 		av->last_data_size = be32_to_cpu(new_vh->data_size);
323 	}
324 
325 	if (av->vol_type == UBI_STATIC_VOLUME)
326 		av->used_ebs = be32_to_cpu(new_vh->used_ebs);
327 
328 	av->leb_count++;
329 
330 	rb_link_node(&new_aeb->u.rb, parent, p);
331 	rb_insert_color(&new_aeb->u.rb, &av->root);
332 
333 	return 0;
334 }
335 
336 /**
337  * process_pool_aeb - we found a non-empty PEB in a pool.
338  * @ubi: UBI device object
339  * @ai: attach info object
340  * @new_vh: the volume header derived from new_aeb
341  * @new_aeb: the AEB to be examined
342  *
343  * Returns 0 on success, < 0 indicates an internal error.
344  */
345 static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
346 			    struct ubi_vid_hdr *new_vh,
347 			    struct ubi_ainf_peb *new_aeb)
348 {
349 	int vol_id = be32_to_cpu(new_vh->vol_id);
350 	struct ubi_ainf_volume *av;
351 
352 	if (vol_id == UBI_FM_SB_VOLUME_ID || vol_id == UBI_FM_DATA_VOLUME_ID) {
353 		ubi_free_aeb(ai, new_aeb);
354 
355 		return 0;
356 	}
357 
358 	/* Find the volume this SEB belongs to */
359 	av = ubi_find_av(ai, vol_id);
360 	if (!av) {
361 		ubi_err(ubi, "orphaned volume in fastmap pool!");
362 		ubi_free_aeb(ai, new_aeb);
363 		return UBI_BAD_FASTMAP;
364 	}
365 
366 	ubi_assert(vol_id == av->vol_id);
367 
368 	return update_vol(ubi, ai, av, new_vh, new_aeb);
369 }
370 
371 /**
372  * unmap_peb - unmap a PEB.
373  * If fastmap detects a free PEB in the pool it has to check whether
374  * this PEB has been unmapped after writing the fastmap.
375  *
376  * @ai: UBI attach info object
377  * @pnum: The PEB to be unmapped
378  */
379 static void unmap_peb(struct ubi_attach_info *ai, int pnum)
380 {
381 	struct ubi_ainf_volume *av;
382 	struct rb_node *node, *node2;
383 	struct ubi_ainf_peb *aeb;
384 
385 	ubi_rb_for_each_entry(node, av, &ai->volumes, rb) {
386 		ubi_rb_for_each_entry(node2, aeb, &av->root, u.rb) {
387 			if (aeb->pnum == pnum) {
388 				rb_erase(&aeb->u.rb, &av->root);
389 				av->leb_count--;
390 				ubi_free_aeb(ai, aeb);
391 				return;
392 			}
393 		}
394 	}
395 }
396 
397 /**
398  * scan_pool - scans a pool for changed (no longer empty PEBs).
399  * @ubi: UBI device object
400  * @ai: attach info object
401  * @pebs: an array of all PEB numbers in the to be scanned pool
402  * @pool_size: size of the pool (number of entries in @pebs)
403  * @max_sqnum: pointer to the maximal sequence number
404  * @free: list of PEBs which are most likely free (and go into @ai->free)
405  *
406  * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned.
407  * < 0 indicates an internal error.
408  */
409 static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
410 		     __be32 *pebs, int pool_size, unsigned long long *max_sqnum,
411 		     struct list_head *free)
412 {
413 	struct ubi_vid_io_buf *vb;
414 	struct ubi_vid_hdr *vh;
415 	struct ubi_ec_hdr *ech;
416 	struct ubi_ainf_peb *new_aeb;
417 	int i, pnum, err, ret = 0;
418 
419 	ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
420 	if (!ech)
421 		return -ENOMEM;
422 
423 	vb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
424 	if (!vb) {
425 		kfree(ech);
426 		return -ENOMEM;
427 	}
428 
429 	vh = ubi_get_vid_hdr(vb);
430 
431 	dbg_bld("scanning fastmap pool: size = %i", pool_size);
432 
433 	/*
434 	 * Now scan all PEBs in the pool to find changes which have been made
435 	 * after the creation of the fastmap
436 	 */
437 	for (i = 0; i < pool_size; i++) {
438 		int scrub = 0;
439 		int image_seq;
440 
441 		pnum = be32_to_cpu(pebs[i]);
442 
443 		if (ubi_io_is_bad(ubi, pnum)) {
444 			ubi_err(ubi, "bad PEB in fastmap pool!");
445 			ret = UBI_BAD_FASTMAP;
446 			goto out;
447 		}
448 
449 		err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
450 		if (err && err != UBI_IO_BITFLIPS) {
451 			ubi_err(ubi, "unable to read EC header! PEB:%i err:%i",
452 				pnum, err);
453 			ret = err > 0 ? UBI_BAD_FASTMAP : err;
454 			goto out;
455 		} else if (err == UBI_IO_BITFLIPS)
456 			scrub = 1;
457 
458 		/*
459 		 * Older UBI implementations have image_seq set to zero, so
460 		 * we shouldn't fail if image_seq == 0.
461 		 */
462 		image_seq = be32_to_cpu(ech->image_seq);
463 
464 		if (image_seq && (image_seq != ubi->image_seq)) {
465 			ubi_err(ubi, "bad image seq: 0x%x, expected: 0x%x",
466 				be32_to_cpu(ech->image_seq), ubi->image_seq);
467 			ret = UBI_BAD_FASTMAP;
468 			goto out;
469 		}
470 
471 		err = ubi_io_read_vid_hdr(ubi, pnum, vb, 0);
472 		if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) {
473 			unsigned long long ec = be64_to_cpu(ech->ec);
474 			unmap_peb(ai, pnum);
475 			dbg_bld("Adding PEB to free: %i", pnum);
476 
477 			if (err == UBI_IO_FF_BITFLIPS)
478 				scrub = 1;
479 
480 			add_aeb(ai, free, pnum, ec, scrub);
481 			continue;
482 		} else if (err == 0 || err == UBI_IO_BITFLIPS) {
483 			dbg_bld("Found non empty PEB:%i in pool", pnum);
484 
485 			if (err == UBI_IO_BITFLIPS)
486 				scrub = 1;
487 
488 			new_aeb = ubi_alloc_aeb(ai, pnum, be64_to_cpu(ech->ec));
489 			if (!new_aeb) {
490 				ret = -ENOMEM;
491 				goto out;
492 			}
493 
494 			new_aeb->lnum = be32_to_cpu(vh->lnum);
495 			new_aeb->sqnum = be64_to_cpu(vh->sqnum);
496 			new_aeb->copy_flag = vh->copy_flag;
497 			new_aeb->scrub = scrub;
498 
499 			if (*max_sqnum < new_aeb->sqnum)
500 				*max_sqnum = new_aeb->sqnum;
501 
502 			err = process_pool_aeb(ubi, ai, vh, new_aeb);
503 			if (err) {
504 				ret = err > 0 ? UBI_BAD_FASTMAP : err;
505 				goto out;
506 			}
507 		} else {
508 			/* We are paranoid and fall back to scanning mode */
509 			ubi_err(ubi, "fastmap pool PEBs contains damaged PEBs!");
510 			ret = err > 0 ? UBI_BAD_FASTMAP : err;
511 			goto out;
512 		}
513 
514 	}
515 
516 out:
517 	ubi_free_vid_buf(vb);
518 	kfree(ech);
519 	return ret;
520 }
521 
522 /**
523  * count_fastmap_pebs - Counts the PEBs found by fastmap.
524  * @ai: The UBI attach info object
525  */
526 static int count_fastmap_pebs(struct ubi_attach_info *ai)
527 {
528 	struct ubi_ainf_peb *aeb;
529 	struct ubi_ainf_volume *av;
530 	struct rb_node *rb1, *rb2;
531 	int n = 0;
532 
533 	list_for_each_entry(aeb, &ai->erase, u.list)
534 		n++;
535 
536 	list_for_each_entry(aeb, &ai->free, u.list)
537 		n++;
538 
539 	ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
540 		ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
541 			n++;
542 
543 	return n;
544 }
545 
546 /**
547  * ubi_attach_fastmap - creates ubi_attach_info from a fastmap.
548  * @ubi: UBI device object
549  * @ai: UBI attach info object
550  * @fm: the fastmap to be attached
551  *
552  * Returns 0 on success, UBI_BAD_FASTMAP if the found fastmap was unusable.
553  * < 0 indicates an internal error.
554  */
555 static int ubi_attach_fastmap(struct ubi_device *ubi,
556 			      struct ubi_attach_info *ai,
557 			      struct ubi_fastmap_layout *fm)
558 {
559 	struct list_head used, free;
560 	struct ubi_ainf_volume *av;
561 	struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb;
562 	struct ubi_fm_sb *fmsb;
563 	struct ubi_fm_hdr *fmhdr;
564 	struct ubi_fm_scan_pool *fmpl, *fmpl_wl;
565 	struct ubi_fm_ec *fmec;
566 	struct ubi_fm_volhdr *fmvhdr;
567 	struct ubi_fm_eba *fm_eba;
568 	int ret, i, j, pool_size, wl_pool_size;
569 	size_t fm_pos = 0, fm_size = ubi->fm_size;
570 	unsigned long long max_sqnum = 0;
571 	void *fm_raw = ubi->fm_buf;
572 
573 	INIT_LIST_HEAD(&used);
574 	INIT_LIST_HEAD(&free);
575 	ai->min_ec = UBI_MAX_ERASECOUNTER;
576 
577 	fmsb = (struct ubi_fm_sb *)(fm_raw);
578 	ai->max_sqnum = fmsb->sqnum;
579 	fm_pos += sizeof(struct ubi_fm_sb);
580 	if (fm_pos >= fm_size)
581 		goto fail_bad;
582 
583 	fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
584 	fm_pos += sizeof(*fmhdr);
585 	if (fm_pos >= fm_size)
586 		goto fail_bad;
587 
588 	if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) {
589 		ubi_err(ubi, "bad fastmap header magic: 0x%x, expected: 0x%x",
590 			be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC);
591 		goto fail_bad;
592 	}
593 
594 	fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
595 	fm_pos += sizeof(*fmpl);
596 	if (fm_pos >= fm_size)
597 		goto fail_bad;
598 	if (be32_to_cpu(fmpl->magic) != UBI_FM_POOL_MAGIC) {
599 		ubi_err(ubi, "bad fastmap pool magic: 0x%x, expected: 0x%x",
600 			be32_to_cpu(fmpl->magic), UBI_FM_POOL_MAGIC);
601 		goto fail_bad;
602 	}
603 
604 	fmpl_wl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
605 	fm_pos += sizeof(*fmpl_wl);
606 	if (fm_pos >= fm_size)
607 		goto fail_bad;
608 	if (be32_to_cpu(fmpl_wl->magic) != UBI_FM_POOL_MAGIC) {
609 		ubi_err(ubi, "bad fastmap WL pool magic: 0x%x, expected: 0x%x",
610 			be32_to_cpu(fmpl_wl->magic), UBI_FM_POOL_MAGIC);
611 		goto fail_bad;
612 	}
613 
614 	pool_size = be16_to_cpu(fmpl->size);
615 	wl_pool_size = be16_to_cpu(fmpl_wl->size);
616 	fm->max_pool_size = be16_to_cpu(fmpl->max_size);
617 	fm->max_wl_pool_size = be16_to_cpu(fmpl_wl->max_size);
618 
619 	if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) {
620 		ubi_err(ubi, "bad pool size: %i", pool_size);
621 		goto fail_bad;
622 	}
623 
624 	if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) {
625 		ubi_err(ubi, "bad WL pool size: %i", wl_pool_size);
626 		goto fail_bad;
627 	}
628 
629 
630 	if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE ||
631 	    fm->max_pool_size < 0) {
632 		ubi_err(ubi, "bad maximal pool size: %i", fm->max_pool_size);
633 		goto fail_bad;
634 	}
635 
636 	if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE ||
637 	    fm->max_wl_pool_size < 0) {
638 		ubi_err(ubi, "bad maximal WL pool size: %i",
639 			fm->max_wl_pool_size);
640 		goto fail_bad;
641 	}
642 
643 	/* read EC values from free list */
644 	for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) {
645 		fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
646 		fm_pos += sizeof(*fmec);
647 		if (fm_pos >= fm_size)
648 			goto fail_bad;
649 
650 		add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
651 			be32_to_cpu(fmec->ec), 0);
652 	}
653 
654 	/* read EC values from used list */
655 	for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) {
656 		fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
657 		fm_pos += sizeof(*fmec);
658 		if (fm_pos >= fm_size)
659 			goto fail_bad;
660 
661 		add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
662 			be32_to_cpu(fmec->ec), 0);
663 	}
664 
665 	/* read EC values from scrub list */
666 	for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) {
667 		fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
668 		fm_pos += sizeof(*fmec);
669 		if (fm_pos >= fm_size)
670 			goto fail_bad;
671 
672 		add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
673 			be32_to_cpu(fmec->ec), 1);
674 	}
675 
676 	/* read EC values from erase list */
677 	for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) {
678 		fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
679 		fm_pos += sizeof(*fmec);
680 		if (fm_pos >= fm_size)
681 			goto fail_bad;
682 
683 		add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
684 			be32_to_cpu(fmec->ec), 1);
685 	}
686 
687 	ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
688 	ai->bad_peb_count = be32_to_cpu(fmhdr->bad_peb_count);
689 
690 	/* Iterate over all volumes and read their EBA table */
691 	for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) {
692 		fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
693 		fm_pos += sizeof(*fmvhdr);
694 		if (fm_pos >= fm_size)
695 			goto fail_bad;
696 
697 		if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) {
698 			ubi_err(ubi, "bad fastmap vol header magic: 0x%x, expected: 0x%x",
699 				be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC);
700 			goto fail_bad;
701 		}
702 
703 		av = add_vol(ai, be32_to_cpu(fmvhdr->vol_id),
704 			     be32_to_cpu(fmvhdr->used_ebs),
705 			     be32_to_cpu(fmvhdr->data_pad),
706 			     fmvhdr->vol_type,
707 			     be32_to_cpu(fmvhdr->last_eb_bytes));
708 
709 		if (IS_ERR(av)) {
710 			if (PTR_ERR(av) == -EEXIST)
711 				ubi_err(ubi, "volume (ID %i) already exists",
712 					fmvhdr->vol_id);
713 
714 			goto fail_bad;
715 		}
716 
717 		ai->vols_found++;
718 		if (ai->highest_vol_id < be32_to_cpu(fmvhdr->vol_id))
719 			ai->highest_vol_id = be32_to_cpu(fmvhdr->vol_id);
720 
721 		fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
722 		fm_pos += sizeof(*fm_eba);
723 		fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs));
724 		if (fm_pos >= fm_size)
725 			goto fail_bad;
726 
727 		if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) {
728 			ubi_err(ubi, "bad fastmap EBA header magic: 0x%x, expected: 0x%x",
729 				be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC);
730 			goto fail_bad;
731 		}
732 
733 		for (j = 0; j < be32_to_cpu(fm_eba->reserved_pebs); j++) {
734 			int pnum = be32_to_cpu(fm_eba->pnum[j]);
735 
736 			if (pnum < 0)
737 				continue;
738 
739 			aeb = NULL;
740 			list_for_each_entry(tmp_aeb, &used, u.list) {
741 				if (tmp_aeb->pnum == pnum) {
742 					aeb = tmp_aeb;
743 					break;
744 				}
745 			}
746 
747 			if (!aeb) {
748 				ubi_err(ubi, "PEB %i is in EBA but not in used list", pnum);
749 				goto fail_bad;
750 			}
751 
752 			aeb->lnum = j;
753 
754 			if (av->highest_lnum <= aeb->lnum)
755 				av->highest_lnum = aeb->lnum;
756 
757 			assign_aeb_to_av(ai, aeb, av);
758 
759 			dbg_bld("inserting PEB:%i (LEB %i) to vol %i",
760 				aeb->pnum, aeb->lnum, av->vol_id);
761 		}
762 	}
763 
764 	ret = scan_pool(ubi, ai, fmpl->pebs, pool_size, &max_sqnum, &free);
765 	if (ret)
766 		goto fail;
767 
768 	ret = scan_pool(ubi, ai, fmpl_wl->pebs, wl_pool_size, &max_sqnum, &free);
769 	if (ret)
770 		goto fail;
771 
772 	if (max_sqnum > ai->max_sqnum)
773 		ai->max_sqnum = max_sqnum;
774 
775 	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list)
776 		list_move_tail(&tmp_aeb->u.list, &ai->free);
777 
778 	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list)
779 		list_move_tail(&tmp_aeb->u.list, &ai->erase);
780 
781 	ubi_assert(list_empty(&free));
782 
783 	/*
784 	 * If fastmap is leaking PEBs (must not happen), raise a
785 	 * fat warning and fall back to scanning mode.
786 	 * We do this here because in ubi_wl_init() it's too late
787 	 * and we cannot fall back to scanning.
788 	 */
789 	if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count -
790 		    ai->bad_peb_count - fm->used_blocks))
791 		goto fail_bad;
792 
793 	return 0;
794 
795 fail_bad:
796 	ret = UBI_BAD_FASTMAP;
797 fail:
798 	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) {
799 		list_del(&tmp_aeb->u.list);
800 		ubi_free_aeb(ai, tmp_aeb);
801 	}
802 	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) {
803 		list_del(&tmp_aeb->u.list);
804 		ubi_free_aeb(ai, tmp_aeb);
805 	}
806 
807 	return ret;
808 }
809 
810 /**
811  * find_fm_anchor - find the most recent Fastmap superblock (anchor)
812  * @ai: UBI attach info to be filled
813  */
814 static int find_fm_anchor(struct ubi_attach_info *ai)
815 {
816 	int ret = -1;
817 	struct ubi_ainf_peb *aeb;
818 	unsigned long long max_sqnum = 0;
819 
820 	list_for_each_entry(aeb, &ai->fastmap, u.list) {
821 		if (aeb->vol_id == UBI_FM_SB_VOLUME_ID && aeb->sqnum > max_sqnum) {
822 			max_sqnum = aeb->sqnum;
823 			ret = aeb->pnum;
824 		}
825 	}
826 
827 	return ret;
828 }
829 
830 static struct ubi_ainf_peb *clone_aeb(struct ubi_attach_info *ai,
831 				      struct ubi_ainf_peb *old)
832 {
833 	struct ubi_ainf_peb *new;
834 
835 	new = ubi_alloc_aeb(ai, old->pnum, old->ec);
836 	if (!new)
837 		return NULL;
838 
839 	new->vol_id = old->vol_id;
840 	new->sqnum = old->sqnum;
841 	new->lnum = old->lnum;
842 	new->scrub = old->scrub;
843 	new->copy_flag = old->copy_flag;
844 
845 	return new;
846 }
847 
848 /**
849  * ubi_scan_fastmap - scan the fastmap.
850  * @ubi: UBI device object
851  * @ai: UBI attach info to be filled
852  * @scan_ai: UBI attach info from the first 64 PEBs,
853  *           used to find the most recent Fastmap data structure
854  *
855  * Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found,
856  * UBI_BAD_FASTMAP if one was found but is not usable.
857  * < 0 indicates an internal error.
858  */
859 int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
860 		     struct ubi_attach_info *scan_ai)
861 {
862 	struct ubi_fm_sb *fmsb, *fmsb2;
863 	struct ubi_vid_io_buf *vb;
864 	struct ubi_vid_hdr *vh;
865 	struct ubi_ec_hdr *ech;
866 	struct ubi_fastmap_layout *fm;
867 	struct ubi_ainf_peb *aeb;
868 	int i, used_blocks, pnum, fm_anchor, ret = 0;
869 	size_t fm_size;
870 	__be32 crc, tmp_crc;
871 	unsigned long long sqnum = 0;
872 
873 	fm_anchor = find_fm_anchor(scan_ai);
874 	if (fm_anchor < 0)
875 		return UBI_NO_FASTMAP;
876 
877 	/* Copy all (possible) fastmap blocks into our new attach structure. */
878 	list_for_each_entry(aeb, &scan_ai->fastmap, u.list) {
879 		struct ubi_ainf_peb *new;
880 
881 		new = clone_aeb(ai, aeb);
882 		if (!new)
883 			return -ENOMEM;
884 
885 		list_add(&new->u.list, &ai->fastmap);
886 	}
887 
888 	down_write(&ubi->fm_protect);
889 	memset(ubi->fm_buf, 0, ubi->fm_size);
890 
891 	fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL);
892 	if (!fmsb) {
893 		ret = -ENOMEM;
894 		goto out;
895 	}
896 
897 	fm = kzalloc(sizeof(*fm), GFP_KERNEL);
898 	if (!fm) {
899 		ret = -ENOMEM;
900 		kfree(fmsb);
901 		goto out;
902 	}
903 
904 	ret = ubi_io_read_data(ubi, fmsb, fm_anchor, 0, sizeof(*fmsb));
905 	if (ret && ret != UBI_IO_BITFLIPS)
906 		goto free_fm_sb;
907 	else if (ret == UBI_IO_BITFLIPS)
908 		fm->to_be_tortured[0] = 1;
909 
910 	if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) {
911 		ubi_err(ubi, "bad super block magic: 0x%x, expected: 0x%x",
912 			be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC);
913 		ret = UBI_BAD_FASTMAP;
914 		goto free_fm_sb;
915 	}
916 
917 	if (fmsb->version != UBI_FM_FMT_VERSION) {
918 		ubi_err(ubi, "bad fastmap version: %i, expected: %i",
919 			fmsb->version, UBI_FM_FMT_VERSION);
920 		ret = UBI_BAD_FASTMAP;
921 		goto free_fm_sb;
922 	}
923 
924 	used_blocks = be32_to_cpu(fmsb->used_blocks);
925 	if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) {
926 		ubi_err(ubi, "number of fastmap blocks is invalid: %i",
927 			used_blocks);
928 		ret = UBI_BAD_FASTMAP;
929 		goto free_fm_sb;
930 	}
931 
932 	fm_size = ubi->leb_size * used_blocks;
933 	if (fm_size != ubi->fm_size) {
934 		ubi_err(ubi, "bad fastmap size: %zi, expected: %zi",
935 			fm_size, ubi->fm_size);
936 		ret = UBI_BAD_FASTMAP;
937 		goto free_fm_sb;
938 	}
939 
940 	ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
941 	if (!ech) {
942 		ret = -ENOMEM;
943 		goto free_fm_sb;
944 	}
945 
946 	vb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
947 	if (!vb) {
948 		ret = -ENOMEM;
949 		goto free_hdr;
950 	}
951 
952 	vh = ubi_get_vid_hdr(vb);
953 
954 	for (i = 0; i < used_blocks; i++) {
955 		int image_seq;
956 
957 		pnum = be32_to_cpu(fmsb->block_loc[i]);
958 
959 		if (ubi_io_is_bad(ubi, pnum)) {
960 			ret = UBI_BAD_FASTMAP;
961 			goto free_hdr;
962 		}
963 
964 		if (i == 0 && pnum != fm_anchor) {
965 			ubi_err(ubi, "Fastmap anchor PEB mismatch: PEB: %i vs. %i",
966 				pnum, fm_anchor);
967 			ret = UBI_BAD_FASTMAP;
968 			goto free_hdr;
969 		}
970 
971 		ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
972 		if (ret && ret != UBI_IO_BITFLIPS) {
973 			ubi_err(ubi, "unable to read fastmap block# %i EC (PEB: %i)",
974 				i, pnum);
975 			if (ret > 0)
976 				ret = UBI_BAD_FASTMAP;
977 			goto free_hdr;
978 		} else if (ret == UBI_IO_BITFLIPS)
979 			fm->to_be_tortured[i] = 1;
980 
981 		image_seq = be32_to_cpu(ech->image_seq);
982 		if (!ubi->image_seq)
983 			ubi->image_seq = image_seq;
984 
985 		/*
986 		 * Older UBI implementations have image_seq set to zero, so
987 		 * we shouldn't fail if image_seq == 0.
988 		 */
989 		if (image_seq && (image_seq != ubi->image_seq)) {
990 			ubi_err(ubi, "wrong image seq:%d instead of %d",
991 				be32_to_cpu(ech->image_seq), ubi->image_seq);
992 			ret = UBI_BAD_FASTMAP;
993 			goto free_hdr;
994 		}
995 
996 		ret = ubi_io_read_vid_hdr(ubi, pnum, vb, 0);
997 		if (ret && ret != UBI_IO_BITFLIPS) {
998 			ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i)",
999 				i, pnum);
1000 			goto free_hdr;
1001 		}
1002 
1003 		if (i == 0) {
1004 			if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) {
1005 				ubi_err(ubi, "bad fastmap anchor vol_id: 0x%x, expected: 0x%x",
1006 					be32_to_cpu(vh->vol_id),
1007 					UBI_FM_SB_VOLUME_ID);
1008 				ret = UBI_BAD_FASTMAP;
1009 				goto free_hdr;
1010 			}
1011 		} else {
1012 			if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) {
1013 				ubi_err(ubi, "bad fastmap data vol_id: 0x%x, expected: 0x%x",
1014 					be32_to_cpu(vh->vol_id),
1015 					UBI_FM_DATA_VOLUME_ID);
1016 				ret = UBI_BAD_FASTMAP;
1017 				goto free_hdr;
1018 			}
1019 		}
1020 
1021 		if (sqnum < be64_to_cpu(vh->sqnum))
1022 			sqnum = be64_to_cpu(vh->sqnum);
1023 
1024 		ret = ubi_io_read_data(ubi, ubi->fm_buf + (ubi->leb_size * i),
1025 				       pnum, 0, ubi->leb_size);
1026 		if (ret && ret != UBI_IO_BITFLIPS) {
1027 			ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i, "
1028 				"err: %i)", i, pnum, ret);
1029 			goto free_hdr;
1030 		}
1031 	}
1032 
1033 	kfree(fmsb);
1034 	fmsb = NULL;
1035 
1036 	fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf);
1037 	tmp_crc = be32_to_cpu(fmsb2->data_crc);
1038 	fmsb2->data_crc = 0;
1039 	crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size);
1040 	if (crc != tmp_crc) {
1041 		ubi_err(ubi, "fastmap data CRC is invalid");
1042 		ubi_err(ubi, "CRC should be: 0x%x, calc: 0x%x",
1043 			tmp_crc, crc);
1044 		ret = UBI_BAD_FASTMAP;
1045 		goto free_hdr;
1046 	}
1047 
1048 	fmsb2->sqnum = sqnum;
1049 
1050 	fm->used_blocks = used_blocks;
1051 
1052 	ret = ubi_attach_fastmap(ubi, ai, fm);
1053 	if (ret) {
1054 		if (ret > 0)
1055 			ret = UBI_BAD_FASTMAP;
1056 		goto free_hdr;
1057 	}
1058 
1059 	for (i = 0; i < used_blocks; i++) {
1060 		struct ubi_wl_entry *e;
1061 
1062 		e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1063 		if (!e) {
1064 			while (i--)
1065 				kmem_cache_free(ubi_wl_entry_slab, fm->e[i]);
1066 
1067 			ret = -ENOMEM;
1068 			goto free_hdr;
1069 		}
1070 
1071 		e->pnum = be32_to_cpu(fmsb2->block_loc[i]);
1072 		e->ec = be32_to_cpu(fmsb2->block_ec[i]);
1073 		fm->e[i] = e;
1074 	}
1075 
1076 	ubi->fm = fm;
1077 	ubi->fm_pool.max_size = ubi->fm->max_pool_size;
1078 	ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size;
1079 	ubi_msg(ubi, "attached by fastmap");
1080 	ubi_msg(ubi, "fastmap pool size: %d", ubi->fm_pool.max_size);
1081 	ubi_msg(ubi, "fastmap WL pool size: %d",
1082 		ubi->fm_wl_pool.max_size);
1083 	ubi->fm_disabled = 0;
1084 	ubi->fast_attach = 1;
1085 
1086 	ubi_free_vid_buf(vb);
1087 	kfree(ech);
1088 out:
1089 	up_write(&ubi->fm_protect);
1090 	if (ret == UBI_BAD_FASTMAP)
1091 		ubi_err(ubi, "Attach by fastmap failed, doing a full scan!");
1092 	return ret;
1093 
1094 free_hdr:
1095 	ubi_free_vid_buf(vb);
1096 	kfree(ech);
1097 free_fm_sb:
1098 	kfree(fmsb);
1099 	kfree(fm);
1100 	goto out;
1101 }
1102 
1103 /**
1104  * ubi_write_fastmap - writes a fastmap.
1105  * @ubi: UBI device object
1106  * @new_fm: the to be written fastmap
1107  *
1108  * Returns 0 on success, < 0 indicates an internal error.
1109  */
1110 static int ubi_write_fastmap(struct ubi_device *ubi,
1111 			     struct ubi_fastmap_layout *new_fm)
1112 {
1113 	size_t fm_pos = 0;
1114 	void *fm_raw;
1115 	struct ubi_fm_sb *fmsb;
1116 	struct ubi_fm_hdr *fmh;
1117 	struct ubi_fm_scan_pool *fmpl, *fmpl_wl;
1118 	struct ubi_fm_ec *fec;
1119 	struct ubi_fm_volhdr *fvh;
1120 	struct ubi_fm_eba *feba;
1121 	struct ubi_wl_entry *wl_e;
1122 	struct ubi_volume *vol;
1123 	struct ubi_vid_io_buf *avbuf, *dvbuf;
1124 	struct ubi_vid_hdr *avhdr, *dvhdr;
1125 	struct ubi_work *ubi_wrk;
1126 	struct rb_node *tmp_rb;
1127 	int ret, i, j, free_peb_count, used_peb_count, vol_count;
1128 	int scrub_peb_count, erase_peb_count;
1129 	unsigned long *seen_pebs = NULL;
1130 
1131 	fm_raw = ubi->fm_buf;
1132 	memset(ubi->fm_buf, 0, ubi->fm_size);
1133 
1134 	avbuf = new_fm_vbuf(ubi, UBI_FM_SB_VOLUME_ID);
1135 	if (!avbuf) {
1136 		ret = -ENOMEM;
1137 		goto out;
1138 	}
1139 
1140 	dvbuf = new_fm_vbuf(ubi, UBI_FM_DATA_VOLUME_ID);
1141 	if (!dvbuf) {
1142 		ret = -ENOMEM;
1143 		goto out_kfree;
1144 	}
1145 
1146 	avhdr = ubi_get_vid_hdr(avbuf);
1147 	dvhdr = ubi_get_vid_hdr(dvbuf);
1148 
1149 	seen_pebs = init_seen(ubi);
1150 	if (IS_ERR(seen_pebs)) {
1151 		ret = PTR_ERR(seen_pebs);
1152 		goto out_kfree;
1153 	}
1154 
1155 	spin_lock(&ubi->volumes_lock);
1156 	spin_lock(&ubi->wl_lock);
1157 
1158 	fmsb = (struct ubi_fm_sb *)fm_raw;
1159 	fm_pos += sizeof(*fmsb);
1160 	ubi_assert(fm_pos <= ubi->fm_size);
1161 
1162 	fmh = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
1163 	fm_pos += sizeof(*fmh);
1164 	ubi_assert(fm_pos <= ubi->fm_size);
1165 
1166 	fmsb->magic = cpu_to_be32(UBI_FM_SB_MAGIC);
1167 	fmsb->version = UBI_FM_FMT_VERSION;
1168 	fmsb->used_blocks = cpu_to_be32(new_fm->used_blocks);
1169 	/* the max sqnum will be filled in while *reading* the fastmap */
1170 	fmsb->sqnum = 0;
1171 
1172 	fmh->magic = cpu_to_be32(UBI_FM_HDR_MAGIC);
1173 	free_peb_count = 0;
1174 	used_peb_count = 0;
1175 	scrub_peb_count = 0;
1176 	erase_peb_count = 0;
1177 	vol_count = 0;
1178 
1179 	fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1180 	fm_pos += sizeof(*fmpl);
1181 	fmpl->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1182 	fmpl->size = cpu_to_be16(ubi->fm_pool.size);
1183 	fmpl->max_size = cpu_to_be16(ubi->fm_pool.max_size);
1184 
1185 	for (i = 0; i < ubi->fm_pool.size; i++) {
1186 		fmpl->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]);
1187 		set_seen(ubi, ubi->fm_pool.pebs[i], seen_pebs);
1188 	}
1189 
1190 	fmpl_wl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1191 	fm_pos += sizeof(*fmpl_wl);
1192 	fmpl_wl->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1193 	fmpl_wl->size = cpu_to_be16(ubi->fm_wl_pool.size);
1194 	fmpl_wl->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size);
1195 
1196 	for (i = 0; i < ubi->fm_wl_pool.size; i++) {
1197 		fmpl_wl->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]);
1198 		set_seen(ubi, ubi->fm_wl_pool.pebs[i], seen_pebs);
1199 	}
1200 
1201 	ubi_for_each_free_peb(ubi, wl_e, tmp_rb) {
1202 		fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1203 
1204 		fec->pnum = cpu_to_be32(wl_e->pnum);
1205 		set_seen(ubi, wl_e->pnum, seen_pebs);
1206 		fec->ec = cpu_to_be32(wl_e->ec);
1207 
1208 		free_peb_count++;
1209 		fm_pos += sizeof(*fec);
1210 		ubi_assert(fm_pos <= ubi->fm_size);
1211 	}
1212 	fmh->free_peb_count = cpu_to_be32(free_peb_count);
1213 
1214 	ubi_for_each_used_peb(ubi, wl_e, tmp_rb) {
1215 		fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1216 
1217 		fec->pnum = cpu_to_be32(wl_e->pnum);
1218 		set_seen(ubi, wl_e->pnum, seen_pebs);
1219 		fec->ec = cpu_to_be32(wl_e->ec);
1220 
1221 		used_peb_count++;
1222 		fm_pos += sizeof(*fec);
1223 		ubi_assert(fm_pos <= ubi->fm_size);
1224 	}
1225 
1226 	ubi_for_each_protected_peb(ubi, i, wl_e) {
1227 		fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1228 
1229 		fec->pnum = cpu_to_be32(wl_e->pnum);
1230 		set_seen(ubi, wl_e->pnum, seen_pebs);
1231 		fec->ec = cpu_to_be32(wl_e->ec);
1232 
1233 		used_peb_count++;
1234 		fm_pos += sizeof(*fec);
1235 		ubi_assert(fm_pos <= ubi->fm_size);
1236 	}
1237 	fmh->used_peb_count = cpu_to_be32(used_peb_count);
1238 
1239 	ubi_for_each_scrub_peb(ubi, wl_e, tmp_rb) {
1240 		fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1241 
1242 		fec->pnum = cpu_to_be32(wl_e->pnum);
1243 		set_seen(ubi, wl_e->pnum, seen_pebs);
1244 		fec->ec = cpu_to_be32(wl_e->ec);
1245 
1246 		scrub_peb_count++;
1247 		fm_pos += sizeof(*fec);
1248 		ubi_assert(fm_pos <= ubi->fm_size);
1249 	}
1250 	fmh->scrub_peb_count = cpu_to_be32(scrub_peb_count);
1251 
1252 
1253 	list_for_each_entry(ubi_wrk, &ubi->works, list) {
1254 		if (ubi_is_erase_work(ubi_wrk)) {
1255 			wl_e = ubi_wrk->e;
1256 			ubi_assert(wl_e);
1257 
1258 			fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1259 
1260 			fec->pnum = cpu_to_be32(wl_e->pnum);
1261 			set_seen(ubi, wl_e->pnum, seen_pebs);
1262 			fec->ec = cpu_to_be32(wl_e->ec);
1263 
1264 			erase_peb_count++;
1265 			fm_pos += sizeof(*fec);
1266 			ubi_assert(fm_pos <= ubi->fm_size);
1267 		}
1268 	}
1269 	fmh->erase_peb_count = cpu_to_be32(erase_peb_count);
1270 
1271 	for (i = 0; i < UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT; i++) {
1272 		vol = ubi->volumes[i];
1273 
1274 		if (!vol)
1275 			continue;
1276 
1277 		vol_count++;
1278 
1279 		fvh = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
1280 		fm_pos += sizeof(*fvh);
1281 		ubi_assert(fm_pos <= ubi->fm_size);
1282 
1283 		fvh->magic = cpu_to_be32(UBI_FM_VHDR_MAGIC);
1284 		fvh->vol_id = cpu_to_be32(vol->vol_id);
1285 		fvh->vol_type = vol->vol_type;
1286 		fvh->used_ebs = cpu_to_be32(vol->used_ebs);
1287 		fvh->data_pad = cpu_to_be32(vol->data_pad);
1288 		fvh->last_eb_bytes = cpu_to_be32(vol->last_eb_bytes);
1289 
1290 		ubi_assert(vol->vol_type == UBI_DYNAMIC_VOLUME ||
1291 			vol->vol_type == UBI_STATIC_VOLUME);
1292 
1293 		feba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
1294 		fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->reserved_pebs);
1295 		ubi_assert(fm_pos <= ubi->fm_size);
1296 
1297 		for (j = 0; j < vol->reserved_pebs; j++) {
1298 			struct ubi_eba_leb_desc ldesc;
1299 
1300 			ubi_eba_get_ldesc(vol, j, &ldesc);
1301 			feba->pnum[j] = cpu_to_be32(ldesc.pnum);
1302 		}
1303 
1304 		feba->reserved_pebs = cpu_to_be32(j);
1305 		feba->magic = cpu_to_be32(UBI_FM_EBA_MAGIC);
1306 	}
1307 	fmh->vol_count = cpu_to_be32(vol_count);
1308 	fmh->bad_peb_count = cpu_to_be32(ubi->bad_peb_count);
1309 
1310 	avhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1311 	avhdr->lnum = 0;
1312 
1313 	spin_unlock(&ubi->wl_lock);
1314 	spin_unlock(&ubi->volumes_lock);
1315 
1316 	dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum);
1317 	ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avbuf);
1318 	if (ret) {
1319 		ubi_err(ubi, "unable to write vid_hdr to fastmap SB!");
1320 		goto out_kfree;
1321 	}
1322 
1323 	for (i = 0; i < new_fm->used_blocks; i++) {
1324 		fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum);
1325 		set_seen(ubi, new_fm->e[i]->pnum, seen_pebs);
1326 		fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec);
1327 	}
1328 
1329 	fmsb->data_crc = 0;
1330 	fmsb->data_crc = cpu_to_be32(crc32(UBI_CRC32_INIT, fm_raw,
1331 					   ubi->fm_size));
1332 
1333 	for (i = 1; i < new_fm->used_blocks; i++) {
1334 		dvhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1335 		dvhdr->lnum = cpu_to_be32(i);
1336 		dbg_bld("writing fastmap data to PEB %i sqnum %llu",
1337 			new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum));
1338 		ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvbuf);
1339 		if (ret) {
1340 			ubi_err(ubi, "unable to write vid_hdr to PEB %i!",
1341 				new_fm->e[i]->pnum);
1342 			goto out_kfree;
1343 		}
1344 	}
1345 
1346 	for (i = 0; i < new_fm->used_blocks; i++) {
1347 		ret = ubi_io_write_data(ubi, fm_raw + (i * ubi->leb_size),
1348 					new_fm->e[i]->pnum, 0, ubi->leb_size);
1349 		if (ret) {
1350 			ubi_err(ubi, "unable to write fastmap to PEB %i!",
1351 				new_fm->e[i]->pnum);
1352 			goto out_kfree;
1353 		}
1354 	}
1355 
1356 	ubi_assert(new_fm);
1357 	ubi->fm = new_fm;
1358 
1359 	ret = self_check_seen(ubi, seen_pebs);
1360 	dbg_bld("fastmap written!");
1361 
1362 out_kfree:
1363 	ubi_free_vid_buf(avbuf);
1364 	ubi_free_vid_buf(dvbuf);
1365 	free_seen(seen_pebs);
1366 out:
1367 	return ret;
1368 }
1369 
1370 /**
1371  * erase_block - Manually erase a PEB.
1372  * @ubi: UBI device object
1373  * @pnum: PEB to be erased
1374  *
1375  * Returns the new EC value on success, < 0 indicates an internal error.
1376  */
1377 static int erase_block(struct ubi_device *ubi, int pnum)
1378 {
1379 	int ret;
1380 	struct ubi_ec_hdr *ec_hdr;
1381 	long long ec;
1382 
1383 	ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
1384 	if (!ec_hdr)
1385 		return -ENOMEM;
1386 
1387 	ret = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1388 	if (ret < 0)
1389 		goto out;
1390 	else if (ret && ret != UBI_IO_BITFLIPS) {
1391 		ret = -EINVAL;
1392 		goto out;
1393 	}
1394 
1395 	ret = ubi_io_sync_erase(ubi, pnum, 0);
1396 	if (ret < 0)
1397 		goto out;
1398 
1399 	ec = be64_to_cpu(ec_hdr->ec);
1400 	ec += ret;
1401 	if (ec > UBI_MAX_ERASECOUNTER) {
1402 		ret = -EINVAL;
1403 		goto out;
1404 	}
1405 
1406 	ec_hdr->ec = cpu_to_be64(ec);
1407 	ret = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
1408 	if (ret < 0)
1409 		goto out;
1410 
1411 	ret = ec;
1412 out:
1413 	kfree(ec_hdr);
1414 	return ret;
1415 }
1416 
1417 /**
1418  * invalidate_fastmap - destroys a fastmap.
1419  * @ubi: UBI device object
1420  *
1421  * This function ensures that upon next UBI attach a full scan
1422  * is issued. We need this if UBI is about to write a new fastmap
1423  * but is unable to do so. In this case we have two options:
1424  * a) Make sure that the current fastmap will not be usued upon
1425  * attach time and contine or b) fall back to RO mode to have the
1426  * current fastmap in a valid state.
1427  * Returns 0 on success, < 0 indicates an internal error.
1428  */
1429 static int invalidate_fastmap(struct ubi_device *ubi)
1430 {
1431 	int ret;
1432 	struct ubi_fastmap_layout *fm;
1433 	struct ubi_wl_entry *e;
1434 	struct ubi_vid_io_buf *vb = NULL;
1435 	struct ubi_vid_hdr *vh;
1436 
1437 	if (!ubi->fm)
1438 		return 0;
1439 
1440 	ubi->fm = NULL;
1441 
1442 	ret = -ENOMEM;
1443 	fm = kzalloc(sizeof(*fm), GFP_KERNEL);
1444 	if (!fm)
1445 		goto out;
1446 
1447 	vb = new_fm_vbuf(ubi, UBI_FM_SB_VOLUME_ID);
1448 	if (!vb)
1449 		goto out_free_fm;
1450 
1451 	vh = ubi_get_vid_hdr(vb);
1452 
1453 	ret = -ENOSPC;
1454 	e = ubi_wl_get_fm_peb(ubi, 1);
1455 	if (!e)
1456 		goto out_free_fm;
1457 
1458 	/*
1459 	 * Create fake fastmap such that UBI will fall back
1460 	 * to scanning mode.
1461 	 */
1462 	vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1463 	ret = ubi_io_write_vid_hdr(ubi, e->pnum, vb);
1464 	if (ret < 0) {
1465 		ubi_wl_put_fm_peb(ubi, e, 0, 0);
1466 		goto out_free_fm;
1467 	}
1468 
1469 	fm->used_blocks = 1;
1470 	fm->e[0] = e;
1471 
1472 	ubi->fm = fm;
1473 
1474 out:
1475 	ubi_free_vid_buf(vb);
1476 	return ret;
1477 
1478 out_free_fm:
1479 	kfree(fm);
1480 	goto out;
1481 }
1482 
1483 /**
1484  * return_fm_pebs - returns all PEBs used by a fastmap back to the
1485  * WL sub-system.
1486  * @ubi: UBI device object
1487  * @fm: fastmap layout object
1488  */
1489 static void return_fm_pebs(struct ubi_device *ubi,
1490 			   struct ubi_fastmap_layout *fm)
1491 {
1492 	int i;
1493 
1494 	if (!fm)
1495 		return;
1496 
1497 	for (i = 0; i < fm->used_blocks; i++) {
1498 		if (fm->e[i]) {
1499 			ubi_wl_put_fm_peb(ubi, fm->e[i], i,
1500 					  fm->to_be_tortured[i]);
1501 			fm->e[i] = NULL;
1502 		}
1503 	}
1504 }
1505 
1506 /**
1507  * ubi_update_fastmap - will be called by UBI if a volume changes or
1508  * a fastmap pool becomes full.
1509  * @ubi: UBI device object
1510  *
1511  * Returns 0 on success, < 0 indicates an internal error.
1512  */
1513 int ubi_update_fastmap(struct ubi_device *ubi)
1514 {
1515 	int ret, i, j;
1516 	struct ubi_fastmap_layout *new_fm, *old_fm;
1517 	struct ubi_wl_entry *tmp_e;
1518 
1519 	down_write(&ubi->fm_protect);
1520 	down_write(&ubi->work_sem);
1521 	down_write(&ubi->fm_eba_sem);
1522 
1523 	ubi_refill_pools(ubi);
1524 
1525 	if (ubi->ro_mode || ubi->fm_disabled) {
1526 		up_write(&ubi->fm_eba_sem);
1527 		up_write(&ubi->work_sem);
1528 		up_write(&ubi->fm_protect);
1529 		return 0;
1530 	}
1531 
1532 	ret = ubi_ensure_anchor_pebs(ubi);
1533 	if (ret) {
1534 		up_write(&ubi->fm_eba_sem);
1535 		up_write(&ubi->work_sem);
1536 		up_write(&ubi->fm_protect);
1537 		return ret;
1538 	}
1539 
1540 	new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
1541 	if (!new_fm) {
1542 		up_write(&ubi->fm_eba_sem);
1543 		up_write(&ubi->work_sem);
1544 		up_write(&ubi->fm_protect);
1545 		return -ENOMEM;
1546 	}
1547 
1548 	new_fm->used_blocks = ubi->fm_size / ubi->leb_size;
1549 	old_fm = ubi->fm;
1550 	ubi->fm = NULL;
1551 
1552 	if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) {
1553 		ubi_err(ubi, "fastmap too large");
1554 		ret = -ENOSPC;
1555 		goto err;
1556 	}
1557 
1558 	for (i = 1; i < new_fm->used_blocks; i++) {
1559 		spin_lock(&ubi->wl_lock);
1560 		tmp_e = ubi_wl_get_fm_peb(ubi, 0);
1561 		spin_unlock(&ubi->wl_lock);
1562 
1563 		if (!tmp_e) {
1564 			if (old_fm && old_fm->e[i]) {
1565 				ret = erase_block(ubi, old_fm->e[i]->pnum);
1566 				if (ret < 0) {
1567 					ubi_err(ubi, "could not erase old fastmap PEB");
1568 
1569 					for (j = 1; j < i; j++) {
1570 						ubi_wl_put_fm_peb(ubi, new_fm->e[j],
1571 								  j, 0);
1572 						new_fm->e[j] = NULL;
1573 					}
1574 					goto err;
1575 				}
1576 				new_fm->e[i] = old_fm->e[i];
1577 				old_fm->e[i] = NULL;
1578 			} else {
1579 				ubi_err(ubi, "could not get any free erase block");
1580 
1581 				for (j = 1; j < i; j++) {
1582 					ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0);
1583 					new_fm->e[j] = NULL;
1584 				}
1585 
1586 				ret = -ENOSPC;
1587 				goto err;
1588 			}
1589 		} else {
1590 			new_fm->e[i] = tmp_e;
1591 
1592 			if (old_fm && old_fm->e[i]) {
1593 				ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
1594 						  old_fm->to_be_tortured[i]);
1595 				old_fm->e[i] = NULL;
1596 			}
1597 		}
1598 	}
1599 
1600 	/* Old fastmap is larger than the new one */
1601 	if (old_fm && new_fm->used_blocks < old_fm->used_blocks) {
1602 		for (i = new_fm->used_blocks; i < old_fm->used_blocks; i++) {
1603 			ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
1604 					  old_fm->to_be_tortured[i]);
1605 			old_fm->e[i] = NULL;
1606 		}
1607 	}
1608 
1609 	spin_lock(&ubi->wl_lock);
1610 	tmp_e = ubi_wl_get_fm_peb(ubi, 1);
1611 	spin_unlock(&ubi->wl_lock);
1612 
1613 	if (old_fm) {
1614 		/* no fresh anchor PEB was found, reuse the old one */
1615 		if (!tmp_e) {
1616 			ret = erase_block(ubi, old_fm->e[0]->pnum);
1617 			if (ret < 0) {
1618 				ubi_err(ubi, "could not erase old anchor PEB");
1619 
1620 				for (i = 1; i < new_fm->used_blocks; i++) {
1621 					ubi_wl_put_fm_peb(ubi, new_fm->e[i],
1622 							  i, 0);
1623 					new_fm->e[i] = NULL;
1624 				}
1625 				goto err;
1626 			}
1627 			new_fm->e[0] = old_fm->e[0];
1628 			new_fm->e[0]->ec = ret;
1629 			old_fm->e[0] = NULL;
1630 		} else {
1631 			/* we've got a new anchor PEB, return the old one */
1632 			ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0,
1633 					  old_fm->to_be_tortured[0]);
1634 			new_fm->e[0] = tmp_e;
1635 			old_fm->e[0] = NULL;
1636 		}
1637 	} else {
1638 		if (!tmp_e) {
1639 			ubi_err(ubi, "could not find any anchor PEB");
1640 
1641 			for (i = 1; i < new_fm->used_blocks; i++) {
1642 				ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0);
1643 				new_fm->e[i] = NULL;
1644 			}
1645 
1646 			ret = -ENOSPC;
1647 			goto err;
1648 		}
1649 		new_fm->e[0] = tmp_e;
1650 	}
1651 
1652 	ret = ubi_write_fastmap(ubi, new_fm);
1653 
1654 	if (ret)
1655 		goto err;
1656 
1657 out_unlock:
1658 	up_write(&ubi->fm_eba_sem);
1659 	up_write(&ubi->work_sem);
1660 	up_write(&ubi->fm_protect);
1661 	kfree(old_fm);
1662 	return ret;
1663 
1664 err:
1665 	ubi_warn(ubi, "Unable to write new fastmap, err=%i", ret);
1666 
1667 	ret = invalidate_fastmap(ubi);
1668 	if (ret < 0) {
1669 		ubi_err(ubi, "Unable to invalidate current fastmap!");
1670 		ubi_ro_mode(ubi);
1671 	} else {
1672 		return_fm_pebs(ubi, old_fm);
1673 		return_fm_pebs(ubi, new_fm);
1674 		ret = 0;
1675 	}
1676 
1677 	kfree(new_fm);
1678 	goto out_unlock;
1679 }
1680