xref: /openbmc/u-boot/drivers/mtd/ubi/fastmap.c (revision 47539e23)
1 /*
2  * Copyright (c) 2012 Linutronix GmbH
3  * Author: Richard Weinberger <richard@nod.at>
4  *
5  * SPDX-License-Identifier:	GPL-2.0+
6  *
7  */
8 
9 #define __UBOOT__
10 #ifndef __UBOOT__
11 #include <linux/crc32.h>
12 #else
13 #include <div64.h>
14 #include <malloc.h>
15 #include <ubi_uboot.h>
16 #endif
17 
18 #include <linux/compat.h>
19 #include <linux/math64.h>
20 #include "ubi.h"
21 
22 /**
23  * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device.
24  * @ubi: UBI device description object
25  */
26 size_t ubi_calc_fm_size(struct ubi_device *ubi)
27 {
28 	size_t size;
29 
30 	size = sizeof(struct ubi_fm_hdr) + \
31 		sizeof(struct ubi_fm_scan_pool) + \
32 		sizeof(struct ubi_fm_scan_pool) + \
33 		(ubi->peb_count * sizeof(struct ubi_fm_ec)) + \
34 		(sizeof(struct ubi_fm_eba) + \
35 		(ubi->peb_count * sizeof(__be32))) + \
36 		sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
37 	return roundup(size, ubi->leb_size);
38 }
39 
40 
41 /**
42  * new_fm_vhdr - allocate a new volume header for fastmap usage.
43  * @ubi: UBI device description object
44  * @vol_id: the VID of the new header
45  *
46  * Returns a new struct ubi_vid_hdr on success.
47  * NULL indicates out of memory.
48  */
49 static struct ubi_vid_hdr *new_fm_vhdr(struct ubi_device *ubi, int vol_id)
50 {
51 	struct ubi_vid_hdr *new;
52 
53 	new = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
54 	if (!new)
55 		goto out;
56 
57 	new->vol_type = UBI_VID_DYNAMIC;
58 	new->vol_id = cpu_to_be32(vol_id);
59 
60 	/* UBI implementations without fastmap support have to delete the
61 	 * fastmap.
62 	 */
63 	new->compat = UBI_COMPAT_DELETE;
64 
65 out:
66 	return new;
67 }
68 
69 /**
70  * add_aeb - create and add a attach erase block to a given list.
71  * @ai: UBI attach info object
72  * @list: the target list
73  * @pnum: PEB number of the new attach erase block
74  * @ec: erease counter of the new LEB
75  * @scrub: scrub this PEB after attaching
76  *
77  * Returns 0 on success, < 0 indicates an internal error.
78  */
79 static int add_aeb(struct ubi_attach_info *ai, struct list_head *list,
80 		   int pnum, int ec, int scrub)
81 {
82 	struct ubi_ainf_peb *aeb;
83 
84 	aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
85 	if (!aeb)
86 		return -ENOMEM;
87 
88 	aeb->pnum = pnum;
89 	aeb->ec = ec;
90 	aeb->lnum = -1;
91 	aeb->scrub = scrub;
92 	aeb->copy_flag = aeb->sqnum = 0;
93 
94 	ai->ec_sum += aeb->ec;
95 	ai->ec_count++;
96 
97 	if (ai->max_ec < aeb->ec)
98 		ai->max_ec = aeb->ec;
99 
100 	if (ai->min_ec > aeb->ec)
101 		ai->min_ec = aeb->ec;
102 
103 	list_add_tail(&aeb->u.list, list);
104 
105 	return 0;
106 }
107 
108 /**
109  * add_vol - create and add a new volume to ubi_attach_info.
110  * @ai: ubi_attach_info object
111  * @vol_id: VID of the new volume
112  * @used_ebs: number of used EBS
113  * @data_pad: data padding value of the new volume
114  * @vol_type: volume type
115  * @last_eb_bytes: number of bytes in the last LEB
116  *
117  * Returns the new struct ubi_ainf_volume on success.
118  * NULL indicates an error.
119  */
120 static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
121 				       int used_ebs, int data_pad, u8 vol_type,
122 				       int last_eb_bytes)
123 {
124 	struct ubi_ainf_volume *av;
125 	struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
126 
127 	while (*p) {
128 		parent = *p;
129 		av = rb_entry(parent, struct ubi_ainf_volume, rb);
130 
131 		if (vol_id > av->vol_id)
132 			p = &(*p)->rb_left;
133 		else if (vol_id > av->vol_id)
134 			p = &(*p)->rb_right;
135 	}
136 
137 	av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL);
138 	if (!av)
139 		goto out;
140 
141 	av->highest_lnum = av->leb_count = 0;
142 	av->vol_id = vol_id;
143 	av->used_ebs = used_ebs;
144 	av->data_pad = data_pad;
145 	av->last_data_size = last_eb_bytes;
146 	av->compat = 0;
147 	av->vol_type = vol_type;
148 	av->root = RB_ROOT;
149 
150 	dbg_bld("found volume (ID %i)", vol_id);
151 
152 	rb_link_node(&av->rb, parent, p);
153 	rb_insert_color(&av->rb, &ai->volumes);
154 
155 out:
156 	return av;
157 }
158 
159 /**
160  * assign_aeb_to_av - assigns a SEB to a given ainf_volume and removes it
161  * from it's original list.
162  * @ai: ubi_attach_info object
163  * @aeb: the to be assigned SEB
164  * @av: target scan volume
165  */
166 static void assign_aeb_to_av(struct ubi_attach_info *ai,
167 			     struct ubi_ainf_peb *aeb,
168 			     struct ubi_ainf_volume *av)
169 {
170 	struct ubi_ainf_peb *tmp_aeb;
171 	struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
172 
173 	p = &av->root.rb_node;
174 	while (*p) {
175 		parent = *p;
176 
177 		tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
178 		if (aeb->lnum != tmp_aeb->lnum) {
179 			if (aeb->lnum < tmp_aeb->lnum)
180 				p = &(*p)->rb_left;
181 			else
182 				p = &(*p)->rb_right;
183 
184 			continue;
185 		} else
186 			break;
187 	}
188 
189 	list_del(&aeb->u.list);
190 	av->leb_count++;
191 
192 	rb_link_node(&aeb->u.rb, parent, p);
193 	rb_insert_color(&aeb->u.rb, &av->root);
194 }
195 
196 /**
197  * update_vol - inserts or updates a LEB which was found a pool.
198  * @ubi: the UBI device object
199  * @ai: attach info object
200  * @av: the volume this LEB belongs to
201  * @new_vh: the volume header derived from new_aeb
202  * @new_aeb: the AEB to be examined
203  *
204  * Returns 0 on success, < 0 indicates an internal error.
205  */
206 static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
207 		      struct ubi_ainf_volume *av, struct ubi_vid_hdr *new_vh,
208 		      struct ubi_ainf_peb *new_aeb)
209 {
210 	struct rb_node **p = &av->root.rb_node, *parent = NULL;
211 	struct ubi_ainf_peb *aeb, *victim;
212 	int cmp_res;
213 
214 	while (*p) {
215 		parent = *p;
216 		aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
217 
218 		if (be32_to_cpu(new_vh->lnum) != aeb->lnum) {
219 			if (be32_to_cpu(new_vh->lnum) < aeb->lnum)
220 				p = &(*p)->rb_left;
221 			else
222 				p = &(*p)->rb_right;
223 
224 			continue;
225 		}
226 
227 		/* This case can happen if the fastmap gets written
228 		 * because of a volume change (creation, deletion, ..).
229 		 * Then a PEB can be within the persistent EBA and the pool.
230 		 */
231 		if (aeb->pnum == new_aeb->pnum) {
232 			ubi_assert(aeb->lnum == new_aeb->lnum);
233 			kmem_cache_free(ai->aeb_slab_cache, new_aeb);
234 
235 			return 0;
236 		}
237 
238 		cmp_res = ubi_compare_lebs(ubi, aeb, new_aeb->pnum, new_vh);
239 		if (cmp_res < 0)
240 			return cmp_res;
241 
242 		/* new_aeb is newer */
243 		if (cmp_res & 1) {
244 			victim = kmem_cache_alloc(ai->aeb_slab_cache,
245 				GFP_KERNEL);
246 			if (!victim)
247 				return -ENOMEM;
248 
249 			victim->ec = aeb->ec;
250 			victim->pnum = aeb->pnum;
251 			list_add_tail(&victim->u.list, &ai->erase);
252 
253 			if (av->highest_lnum == be32_to_cpu(new_vh->lnum))
254 				av->last_data_size = \
255 					be32_to_cpu(new_vh->data_size);
256 
257 			dbg_bld("vol %i: AEB %i's PEB %i is the newer",
258 				av->vol_id, aeb->lnum, new_aeb->pnum);
259 
260 			aeb->ec = new_aeb->ec;
261 			aeb->pnum = new_aeb->pnum;
262 			aeb->copy_flag = new_vh->copy_flag;
263 			aeb->scrub = new_aeb->scrub;
264 			kmem_cache_free(ai->aeb_slab_cache, new_aeb);
265 
266 		/* new_aeb is older */
267 		} else {
268 			dbg_bld("vol %i: AEB %i's PEB %i is old, dropping it",
269 				av->vol_id, aeb->lnum, new_aeb->pnum);
270 			list_add_tail(&new_aeb->u.list, &ai->erase);
271 		}
272 
273 		return 0;
274 	}
275 	/* This LEB is new, let's add it to the volume */
276 
277 	if (av->highest_lnum <= be32_to_cpu(new_vh->lnum)) {
278 		av->highest_lnum = be32_to_cpu(new_vh->lnum);
279 		av->last_data_size = be32_to_cpu(new_vh->data_size);
280 	}
281 
282 	if (av->vol_type == UBI_STATIC_VOLUME)
283 		av->used_ebs = be32_to_cpu(new_vh->used_ebs);
284 
285 	av->leb_count++;
286 
287 	rb_link_node(&new_aeb->u.rb, parent, p);
288 	rb_insert_color(&new_aeb->u.rb, &av->root);
289 
290 	return 0;
291 }
292 
293 /**
294  * process_pool_aeb - we found a non-empty PEB in a pool.
295  * @ubi: UBI device object
296  * @ai: attach info object
297  * @new_vh: the volume header derived from new_aeb
298  * @new_aeb: the AEB to be examined
299  *
300  * Returns 0 on success, < 0 indicates an internal error.
301  */
302 static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
303 			    struct ubi_vid_hdr *new_vh,
304 			    struct ubi_ainf_peb *new_aeb)
305 {
306 	struct ubi_ainf_volume *av, *tmp_av = NULL;
307 	struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
308 	int found = 0;
309 
310 	if (be32_to_cpu(new_vh->vol_id) == UBI_FM_SB_VOLUME_ID ||
311 		be32_to_cpu(new_vh->vol_id) == UBI_FM_DATA_VOLUME_ID) {
312 		kmem_cache_free(ai->aeb_slab_cache, new_aeb);
313 
314 		return 0;
315 	}
316 
317 	/* Find the volume this SEB belongs to */
318 	while (*p) {
319 		parent = *p;
320 		tmp_av = rb_entry(parent, struct ubi_ainf_volume, rb);
321 
322 		if (be32_to_cpu(new_vh->vol_id) > tmp_av->vol_id)
323 			p = &(*p)->rb_left;
324 		else if (be32_to_cpu(new_vh->vol_id) < tmp_av->vol_id)
325 			p = &(*p)->rb_right;
326 		else {
327 			found = 1;
328 			break;
329 		}
330 	}
331 
332 	if (found)
333 		av = tmp_av;
334 	else {
335 		ubi_err("orphaned volume in fastmap pool!");
336 		return UBI_BAD_FASTMAP;
337 	}
338 
339 	ubi_assert(be32_to_cpu(new_vh->vol_id) == av->vol_id);
340 
341 	return update_vol(ubi, ai, av, new_vh, new_aeb);
342 }
343 
344 /**
345  * unmap_peb - unmap a PEB.
346  * If fastmap detects a free PEB in the pool it has to check whether
347  * this PEB has been unmapped after writing the fastmap.
348  *
349  * @ai: UBI attach info object
350  * @pnum: The PEB to be unmapped
351  */
352 static void unmap_peb(struct ubi_attach_info *ai, int pnum)
353 {
354 	struct ubi_ainf_volume *av;
355 	struct rb_node *node, *node2;
356 	struct ubi_ainf_peb *aeb;
357 
358 	for (node = rb_first(&ai->volumes); node; node = rb_next(node)) {
359 		av = rb_entry(node, struct ubi_ainf_volume, rb);
360 
361 		for (node2 = rb_first(&av->root); node2;
362 		     node2 = rb_next(node2)) {
363 			aeb = rb_entry(node2, struct ubi_ainf_peb, u.rb);
364 			if (aeb->pnum == pnum) {
365 				rb_erase(&aeb->u.rb, &av->root);
366 				kmem_cache_free(ai->aeb_slab_cache, aeb);
367 				return;
368 			}
369 		}
370 	}
371 }
372 
373 /**
374  * scan_pool - scans a pool for changed (no longer empty PEBs).
375  * @ubi: UBI device object
376  * @ai: attach info object
377  * @pebs: an array of all PEB numbers in the to be scanned pool
378  * @pool_size: size of the pool (number of entries in @pebs)
379  * @max_sqnum: pointer to the maximal sequence number
380  * @eba_orphans: list of PEBs which need to be scanned
381  * @free: list of PEBs which are most likely free (and go into @ai->free)
382  *
383  * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned.
384  * < 0 indicates an internal error.
385  */
386 #ifndef __UBOOT__
387 static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
388 		     int *pebs, int pool_size, unsigned long long *max_sqnum,
389 		     struct list_head *eba_orphans, struct list_head *freef)
390 #else
391 static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
392 		     __be32 *pebs, int pool_size, unsigned long long *max_sqnum,
393 		     struct list_head *eba_orphans, struct list_head *freef)
394 #endif
395 {
396 	struct ubi_vid_hdr *vh;
397 	struct ubi_ec_hdr *ech;
398 	struct ubi_ainf_peb *new_aeb, *tmp_aeb;
399 	int i, pnum, err, found_orphan, ret = 0;
400 
401 	ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
402 	if (!ech)
403 		return -ENOMEM;
404 
405 	vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
406 	if (!vh) {
407 		kfree(ech);
408 		return -ENOMEM;
409 	}
410 
411 	dbg_bld("scanning fastmap pool: size = %i", pool_size);
412 
413 	/*
414 	 * Now scan all PEBs in the pool to find changes which have been made
415 	 * after the creation of the fastmap
416 	 */
417 	for (i = 0; i < pool_size; i++) {
418 		int scrub = 0;
419 		int image_seq;
420 
421 		pnum = be32_to_cpu(pebs[i]);
422 
423 		if (ubi_io_is_bad(ubi, pnum)) {
424 			ubi_err("bad PEB in fastmap pool!");
425 			ret = UBI_BAD_FASTMAP;
426 			goto out;
427 		}
428 
429 		err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
430 		if (err && err != UBI_IO_BITFLIPS) {
431 			ubi_err("unable to read EC header! PEB:%i err:%i",
432 				pnum, err);
433 			ret = err > 0 ? UBI_BAD_FASTMAP : err;
434 			goto out;
435 		} else if (ret == UBI_IO_BITFLIPS)
436 			scrub = 1;
437 
438 		/*
439 		 * Older UBI implementations have image_seq set to zero, so
440 		 * we shouldn't fail if image_seq == 0.
441 		 */
442 		image_seq = be32_to_cpu(ech->image_seq);
443 
444 		if (image_seq && (image_seq != ubi->image_seq)) {
445 			ubi_err("bad image seq: 0x%x, expected: 0x%x",
446 				be32_to_cpu(ech->image_seq), ubi->image_seq);
447 			ret = UBI_BAD_FASTMAP;
448 			goto out;
449 		}
450 
451 		err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
452 		if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) {
453 			unsigned long long ec = be64_to_cpu(ech->ec);
454 			unmap_peb(ai, pnum);
455 			dbg_bld("Adding PEB to free: %i", pnum);
456 			if (err == UBI_IO_FF_BITFLIPS)
457 				add_aeb(ai, freef, pnum, ec, 1);
458 			else
459 				add_aeb(ai, freef, pnum, ec, 0);
460 			continue;
461 		} else if (err == 0 || err == UBI_IO_BITFLIPS) {
462 			dbg_bld("Found non empty PEB:%i in pool", pnum);
463 
464 			if (err == UBI_IO_BITFLIPS)
465 				scrub = 1;
466 
467 			found_orphan = 0;
468 			list_for_each_entry(tmp_aeb, eba_orphans, u.list) {
469 				if (tmp_aeb->pnum == pnum) {
470 					found_orphan = 1;
471 					break;
472 				}
473 			}
474 			if (found_orphan) {
475 				list_del(&tmp_aeb->u.list);
476 				kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
477 			}
478 
479 			new_aeb = kmem_cache_alloc(ai->aeb_slab_cache,
480 						   GFP_KERNEL);
481 			if (!new_aeb) {
482 				ret = -ENOMEM;
483 				goto out;
484 			}
485 
486 			new_aeb->ec = be64_to_cpu(ech->ec);
487 			new_aeb->pnum = pnum;
488 			new_aeb->lnum = be32_to_cpu(vh->lnum);
489 			new_aeb->sqnum = be64_to_cpu(vh->sqnum);
490 			new_aeb->copy_flag = vh->copy_flag;
491 			new_aeb->scrub = scrub;
492 
493 			if (*max_sqnum < new_aeb->sqnum)
494 				*max_sqnum = new_aeb->sqnum;
495 
496 			err = process_pool_aeb(ubi, ai, vh, new_aeb);
497 			if (err) {
498 				ret = err > 0 ? UBI_BAD_FASTMAP : err;
499 				goto out;
500 			}
501 		} else {
502 			/* We are paranoid and fall back to scanning mode */
503 			ubi_err("fastmap pool PEBs contains damaged PEBs!");
504 			ret = err > 0 ? UBI_BAD_FASTMAP : err;
505 			goto out;
506 		}
507 
508 	}
509 
510 out:
511 	ubi_free_vid_hdr(ubi, vh);
512 	kfree(ech);
513 	return ret;
514 }
515 
516 /**
517  * count_fastmap_pebs - Counts the PEBs found by fastmap.
518  * @ai: The UBI attach info object
519  */
520 static int count_fastmap_pebs(struct ubi_attach_info *ai)
521 {
522 	struct ubi_ainf_peb *aeb;
523 	struct ubi_ainf_volume *av;
524 	struct rb_node *rb1, *rb2;
525 	int n = 0;
526 
527 	list_for_each_entry(aeb, &ai->erase, u.list)
528 		n++;
529 
530 	list_for_each_entry(aeb, &ai->free, u.list)
531 		n++;
532 
533 	 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
534 		ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
535 			n++;
536 
537 	return n;
538 }
539 
540 /**
541  * ubi_attach_fastmap - creates ubi_attach_info from a fastmap.
542  * @ubi: UBI device object
543  * @ai: UBI attach info object
544  * @fm: the fastmap to be attached
545  *
546  * Returns 0 on success, UBI_BAD_FASTMAP if the found fastmap was unusable.
547  * < 0 indicates an internal error.
548  */
549 static int ubi_attach_fastmap(struct ubi_device *ubi,
550 			      struct ubi_attach_info *ai,
551 			      struct ubi_fastmap_layout *fm)
552 {
553 	struct list_head used, eba_orphans, freef;
554 	struct ubi_ainf_volume *av;
555 	struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb;
556 	struct ubi_ec_hdr *ech;
557 	struct ubi_fm_sb *fmsb;
558 	struct ubi_fm_hdr *fmhdr;
559 	struct ubi_fm_scan_pool *fmpl1, *fmpl2;
560 	struct ubi_fm_ec *fmec;
561 	struct ubi_fm_volhdr *fmvhdr;
562 	struct ubi_fm_eba *fm_eba;
563 	int ret, i, j, pool_size, wl_pool_size;
564 	size_t fm_pos = 0, fm_size = ubi->fm_size;
565 	unsigned long long max_sqnum = 0;
566 	void *fm_raw = ubi->fm_buf;
567 
568 	INIT_LIST_HEAD(&used);
569 	INIT_LIST_HEAD(&freef);
570 	INIT_LIST_HEAD(&eba_orphans);
571 	INIT_LIST_HEAD(&ai->corr);
572 	INIT_LIST_HEAD(&ai->free);
573 	INIT_LIST_HEAD(&ai->erase);
574 	INIT_LIST_HEAD(&ai->alien);
575 	ai->volumes = RB_ROOT;
576 	ai->min_ec = UBI_MAX_ERASECOUNTER;
577 
578 	ai->aeb_slab_cache = kmem_cache_create("ubi_ainf_peb_slab",
579 					       sizeof(struct ubi_ainf_peb),
580 					       0, 0, NULL);
581 	if (!ai->aeb_slab_cache) {
582 		ret = -ENOMEM;
583 		goto fail;
584 	}
585 
586 	fmsb = (struct ubi_fm_sb *)(fm_raw);
587 	ai->max_sqnum = fmsb->sqnum;
588 	fm_pos += sizeof(struct ubi_fm_sb);
589 	if (fm_pos >= fm_size)
590 		goto fail_bad;
591 
592 	fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
593 	fm_pos += sizeof(*fmhdr);
594 	if (fm_pos >= fm_size)
595 		goto fail_bad;
596 
597 	if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) {
598 		ubi_err("bad fastmap header magic: 0x%x, expected: 0x%x",
599 			be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC);
600 		goto fail_bad;
601 	}
602 
603 	fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
604 	fm_pos += sizeof(*fmpl1);
605 	if (fm_pos >= fm_size)
606 		goto fail_bad;
607 	if (be32_to_cpu(fmpl1->magic) != UBI_FM_POOL_MAGIC) {
608 		ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x",
609 			be32_to_cpu(fmpl1->magic), UBI_FM_POOL_MAGIC);
610 		goto fail_bad;
611 	}
612 
613 	fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
614 	fm_pos += sizeof(*fmpl2);
615 	if (fm_pos >= fm_size)
616 		goto fail_bad;
617 	if (be32_to_cpu(fmpl2->magic) != UBI_FM_POOL_MAGIC) {
618 		ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x",
619 			be32_to_cpu(fmpl2->magic), UBI_FM_POOL_MAGIC);
620 		goto fail_bad;
621 	}
622 
623 	pool_size = be16_to_cpu(fmpl1->size);
624 	wl_pool_size = be16_to_cpu(fmpl2->size);
625 	fm->max_pool_size = be16_to_cpu(fmpl1->max_size);
626 	fm->max_wl_pool_size = be16_to_cpu(fmpl2->max_size);
627 
628 	if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) {
629 		ubi_err("bad pool size: %i", pool_size);
630 		goto fail_bad;
631 	}
632 
633 	if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) {
634 		ubi_err("bad WL pool size: %i", wl_pool_size);
635 		goto fail_bad;
636 	}
637 
638 
639 	if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE ||
640 	    fm->max_pool_size < 0) {
641 		ubi_err("bad maximal pool size: %i", fm->max_pool_size);
642 		goto fail_bad;
643 	}
644 
645 	if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE ||
646 	    fm->max_wl_pool_size < 0) {
647 		ubi_err("bad maximal WL pool size: %i", fm->max_wl_pool_size);
648 		goto fail_bad;
649 	}
650 
651 	/* read EC values from free list */
652 	for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) {
653 		fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
654 		fm_pos += sizeof(*fmec);
655 		if (fm_pos >= fm_size)
656 			goto fail_bad;
657 
658 		add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
659 			be32_to_cpu(fmec->ec), 0);
660 	}
661 
662 	/* read EC values from used list */
663 	for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) {
664 		fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
665 		fm_pos += sizeof(*fmec);
666 		if (fm_pos >= fm_size)
667 			goto fail_bad;
668 
669 		add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
670 			be32_to_cpu(fmec->ec), 0);
671 	}
672 
673 	/* read EC values from scrub list */
674 	for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) {
675 		fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
676 		fm_pos += sizeof(*fmec);
677 		if (fm_pos >= fm_size)
678 			goto fail_bad;
679 
680 		add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
681 			be32_to_cpu(fmec->ec), 1);
682 	}
683 
684 	/* read EC values from erase list */
685 	for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) {
686 		fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
687 		fm_pos += sizeof(*fmec);
688 		if (fm_pos >= fm_size)
689 			goto fail_bad;
690 
691 		add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
692 			be32_to_cpu(fmec->ec), 1);
693 	}
694 
695 	ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
696 	ai->bad_peb_count = be32_to_cpu(fmhdr->bad_peb_count);
697 
698 	/* Iterate over all volumes and read their EBA table */
699 	for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) {
700 		fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
701 		fm_pos += sizeof(*fmvhdr);
702 		if (fm_pos >= fm_size)
703 			goto fail_bad;
704 
705 		if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) {
706 			ubi_err("bad fastmap vol header magic: 0x%x, " \
707 				"expected: 0x%x",
708 				be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC);
709 			goto fail_bad;
710 		}
711 
712 		av = add_vol(ai, be32_to_cpu(fmvhdr->vol_id),
713 			     be32_to_cpu(fmvhdr->used_ebs),
714 			     be32_to_cpu(fmvhdr->data_pad),
715 			     fmvhdr->vol_type,
716 			     be32_to_cpu(fmvhdr->last_eb_bytes));
717 
718 		if (!av)
719 			goto fail_bad;
720 
721 		ai->vols_found++;
722 		if (ai->highest_vol_id < be32_to_cpu(fmvhdr->vol_id))
723 			ai->highest_vol_id = be32_to_cpu(fmvhdr->vol_id);
724 
725 		fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
726 		fm_pos += sizeof(*fm_eba);
727 		fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs));
728 		if (fm_pos >= fm_size)
729 			goto fail_bad;
730 
731 		if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) {
732 			ubi_err("bad fastmap EBA header magic: 0x%x, " \
733 				"expected: 0x%x",
734 				be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC);
735 			goto fail_bad;
736 		}
737 
738 		for (j = 0; j < be32_to_cpu(fm_eba->reserved_pebs); j++) {
739 			int pnum = be32_to_cpu(fm_eba->pnum[j]);
740 
741 			if ((int)be32_to_cpu(fm_eba->pnum[j]) < 0)
742 				continue;
743 
744 			aeb = NULL;
745 			list_for_each_entry(tmp_aeb, &used, u.list) {
746 				if (tmp_aeb->pnum == pnum) {
747 					aeb = tmp_aeb;
748 					break;
749 				}
750 			}
751 
752 			/* This can happen if a PEB is already in an EBA known
753 			 * by this fastmap but the PEB itself is not in the used
754 			 * list.
755 			 * In this case the PEB can be within the fastmap pool
756 			 * or while writing the fastmap it was in the protection
757 			 * queue.
758 			 */
759 			if (!aeb) {
760 				aeb = kmem_cache_alloc(ai->aeb_slab_cache,
761 						       GFP_KERNEL);
762 				if (!aeb) {
763 					ret = -ENOMEM;
764 
765 					goto fail;
766 				}
767 
768 				aeb->lnum = j;
769 				aeb->pnum = be32_to_cpu(fm_eba->pnum[j]);
770 				aeb->ec = -1;
771 				aeb->scrub = aeb->copy_flag = aeb->sqnum = 0;
772 				list_add_tail(&aeb->u.list, &eba_orphans);
773 				continue;
774 			}
775 
776 			aeb->lnum = j;
777 
778 			if (av->highest_lnum <= aeb->lnum)
779 				av->highest_lnum = aeb->lnum;
780 
781 			assign_aeb_to_av(ai, aeb, av);
782 
783 			dbg_bld("inserting PEB:%i (LEB %i) to vol %i",
784 				aeb->pnum, aeb->lnum, av->vol_id);
785 		}
786 
787 		ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
788 		if (!ech) {
789 			ret = -ENOMEM;
790 			goto fail;
791 		}
792 
793 		list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &eba_orphans,
794 					 u.list) {
795 			int err;
796 
797 			if (ubi_io_is_bad(ubi, tmp_aeb->pnum)) {
798 				ubi_err("bad PEB in fastmap EBA orphan list");
799 				ret = UBI_BAD_FASTMAP;
800 				kfree(ech);
801 				goto fail;
802 			}
803 
804 			err = ubi_io_read_ec_hdr(ubi, tmp_aeb->pnum, ech, 0);
805 			if (err && err != UBI_IO_BITFLIPS) {
806 				ubi_err("unable to read EC header! PEB:%i " \
807 					"err:%i", tmp_aeb->pnum, err);
808 				ret = err > 0 ? UBI_BAD_FASTMAP : err;
809 				kfree(ech);
810 
811 				goto fail;
812 			} else if (err == UBI_IO_BITFLIPS)
813 				tmp_aeb->scrub = 1;
814 
815 			tmp_aeb->ec = be64_to_cpu(ech->ec);
816 			assign_aeb_to_av(ai, tmp_aeb, av);
817 		}
818 
819 		kfree(ech);
820 	}
821 
822 	ret = scan_pool(ubi, ai, fmpl1->pebs, pool_size, &max_sqnum,
823 			&eba_orphans, &freef);
824 	if (ret)
825 		goto fail;
826 
827 	ret = scan_pool(ubi, ai, fmpl2->pebs, wl_pool_size, &max_sqnum,
828 			&eba_orphans, &freef);
829 	if (ret)
830 		goto fail;
831 
832 	if (max_sqnum > ai->max_sqnum)
833 		ai->max_sqnum = max_sqnum;
834 
835 	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &freef, u.list)
836 		list_move_tail(&tmp_aeb->u.list, &ai->free);
837 
838 	ubi_assert(list_empty(&used));
839 	ubi_assert(list_empty(&eba_orphans));
840 	ubi_assert(list_empty(&freef));
841 
842 	/*
843 	 * If fastmap is leaking PEBs (must not happen), raise a
844 	 * fat warning and fall back to scanning mode.
845 	 * We do this here because in ubi_wl_init() it's too late
846 	 * and we cannot fall back to scanning.
847 	 */
848 #ifndef __UBOOT__
849 	if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count -
850 		    ai->bad_peb_count - fm->used_blocks))
851 		goto fail_bad;
852 #else
853 	if (count_fastmap_pebs(ai) != ubi->peb_count -
854 		    ai->bad_peb_count - fm->used_blocks) {
855 		WARN_ON(1);
856 		goto fail_bad;
857 	}
858 #endif
859 
860 	return 0;
861 
862 fail_bad:
863 	ret = UBI_BAD_FASTMAP;
864 fail:
865 	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) {
866 		list_del(&tmp_aeb->u.list);
867 		kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
868 	}
869 	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &eba_orphans, u.list) {
870 		list_del(&tmp_aeb->u.list);
871 		kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
872 	}
873 	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &freef, u.list) {
874 		list_del(&tmp_aeb->u.list);
875 		kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
876 	}
877 
878 	return ret;
879 }
880 
881 /**
882  * ubi_scan_fastmap - scan the fastmap.
883  * @ubi: UBI device object
884  * @ai: UBI attach info to be filled
885  * @fm_anchor: The fastmap starts at this PEB
886  *
887  * Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found,
888  * UBI_BAD_FASTMAP if one was found but is not usable.
889  * < 0 indicates an internal error.
890  */
891 int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
892 		     int fm_anchor)
893 {
894 	struct ubi_fm_sb *fmsb, *fmsb2;
895 	struct ubi_vid_hdr *vh;
896 	struct ubi_ec_hdr *ech;
897 	struct ubi_fastmap_layout *fm;
898 	int i, used_blocks, pnum, ret = 0;
899 	size_t fm_size;
900 	__be32 crc, tmp_crc;
901 	unsigned long long sqnum = 0;
902 
903 	mutex_lock(&ubi->fm_mutex);
904 	memset(ubi->fm_buf, 0, ubi->fm_size);
905 
906 	fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL);
907 	if (!fmsb) {
908 		ret = -ENOMEM;
909 		goto out;
910 	}
911 
912 	fm = kzalloc(sizeof(*fm), GFP_KERNEL);
913 	if (!fm) {
914 		ret = -ENOMEM;
915 		kfree(fmsb);
916 		goto out;
917 	}
918 
919 	ret = ubi_io_read(ubi, fmsb, fm_anchor, ubi->leb_start, sizeof(*fmsb));
920 	if (ret && ret != UBI_IO_BITFLIPS)
921 		goto free_fm_sb;
922 	else if (ret == UBI_IO_BITFLIPS)
923 		fm->to_be_tortured[0] = 1;
924 
925 	if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) {
926 		ubi_err("bad super block magic: 0x%x, expected: 0x%x",
927 			be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC);
928 		ret = UBI_BAD_FASTMAP;
929 		goto free_fm_sb;
930 	}
931 
932 	if (fmsb->version != UBI_FM_FMT_VERSION) {
933 		ubi_err("bad fastmap version: %i, expected: %i",
934 			fmsb->version, UBI_FM_FMT_VERSION);
935 		ret = UBI_BAD_FASTMAP;
936 		goto free_fm_sb;
937 	}
938 
939 	used_blocks = be32_to_cpu(fmsb->used_blocks);
940 	if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) {
941 		ubi_err("number of fastmap blocks is invalid: %i", used_blocks);
942 		ret = UBI_BAD_FASTMAP;
943 		goto free_fm_sb;
944 	}
945 
946 	fm_size = ubi->leb_size * used_blocks;
947 	if (fm_size != ubi->fm_size) {
948 		ubi_err("bad fastmap size: %zi, expected: %zi", fm_size,
949 			ubi->fm_size);
950 		ret = UBI_BAD_FASTMAP;
951 		goto free_fm_sb;
952 	}
953 
954 	ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
955 	if (!ech) {
956 		ret = -ENOMEM;
957 		goto free_fm_sb;
958 	}
959 
960 	vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
961 	if (!vh) {
962 		ret = -ENOMEM;
963 		goto free_hdr;
964 	}
965 
966 	for (i = 0; i < used_blocks; i++) {
967 		int image_seq;
968 
969 		pnum = be32_to_cpu(fmsb->block_loc[i]);
970 
971 		if (ubi_io_is_bad(ubi, pnum)) {
972 			ret = UBI_BAD_FASTMAP;
973 			goto free_hdr;
974 		}
975 
976 		ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
977 		if (ret && ret != UBI_IO_BITFLIPS) {
978 			ubi_err("unable to read fastmap block# %i EC (PEB: %i)",
979 				i, pnum);
980 			if (ret > 0)
981 				ret = UBI_BAD_FASTMAP;
982 			goto free_hdr;
983 		} else if (ret == UBI_IO_BITFLIPS)
984 			fm->to_be_tortured[i] = 1;
985 
986 		image_seq = be32_to_cpu(ech->image_seq);
987 		if (!ubi->image_seq)
988 			ubi->image_seq = image_seq;
989 
990 		/*
991 		 * Older UBI implementations have image_seq set to zero, so
992 		 * we shouldn't fail if image_seq == 0.
993 		 */
994 		if (image_seq && (image_seq != ubi->image_seq)) {
995 			ubi_err("wrong image seq:%d instead of %d",
996 				be32_to_cpu(ech->image_seq), ubi->image_seq);
997 			ret = UBI_BAD_FASTMAP;
998 			goto free_hdr;
999 		}
1000 
1001 		ret = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
1002 		if (ret && ret != UBI_IO_BITFLIPS) {
1003 			ubi_err("unable to read fastmap block# %i (PEB: %i)",
1004 				i, pnum);
1005 			goto free_hdr;
1006 		}
1007 
1008 		if (i == 0) {
1009 			if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) {
1010 				ubi_err("bad fastmap anchor vol_id: 0x%x," \
1011 					" expected: 0x%x",
1012 					be32_to_cpu(vh->vol_id),
1013 					UBI_FM_SB_VOLUME_ID);
1014 				ret = UBI_BAD_FASTMAP;
1015 				goto free_hdr;
1016 			}
1017 		} else {
1018 			if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) {
1019 				ubi_err("bad fastmap data vol_id: 0x%x," \
1020 					" expected: 0x%x",
1021 					be32_to_cpu(vh->vol_id),
1022 					UBI_FM_DATA_VOLUME_ID);
1023 				ret = UBI_BAD_FASTMAP;
1024 				goto free_hdr;
1025 			}
1026 		}
1027 
1028 		if (sqnum < be64_to_cpu(vh->sqnum))
1029 			sqnum = be64_to_cpu(vh->sqnum);
1030 
1031 		ret = ubi_io_read(ubi, ubi->fm_buf + (ubi->leb_size * i), pnum,
1032 				  ubi->leb_start, ubi->leb_size);
1033 		if (ret && ret != UBI_IO_BITFLIPS) {
1034 			ubi_err("unable to read fastmap block# %i (PEB: %i, " \
1035 				"err: %i)", i, pnum, ret);
1036 			goto free_hdr;
1037 		}
1038 	}
1039 
1040 	kfree(fmsb);
1041 	fmsb = NULL;
1042 
1043 	fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf);
1044 	tmp_crc = be32_to_cpu(fmsb2->data_crc);
1045 	fmsb2->data_crc = 0;
1046 	crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size);
1047 	if (crc != tmp_crc) {
1048 		ubi_err("fastmap data CRC is invalid");
1049 		ubi_err("CRC should be: 0x%x, calc: 0x%x", tmp_crc, crc);
1050 		ret = UBI_BAD_FASTMAP;
1051 		goto free_hdr;
1052 	}
1053 
1054 	fmsb2->sqnum = sqnum;
1055 
1056 	fm->used_blocks = used_blocks;
1057 
1058 	ret = ubi_attach_fastmap(ubi, ai, fm);
1059 	if (ret) {
1060 		if (ret > 0)
1061 			ret = UBI_BAD_FASTMAP;
1062 		goto free_hdr;
1063 	}
1064 
1065 	for (i = 0; i < used_blocks; i++) {
1066 		struct ubi_wl_entry *e;
1067 
1068 		e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1069 		if (!e) {
1070 			while (i--)
1071 				kfree(fm->e[i]);
1072 
1073 			ret = -ENOMEM;
1074 			goto free_hdr;
1075 		}
1076 
1077 		e->pnum = be32_to_cpu(fmsb2->block_loc[i]);
1078 		e->ec = be32_to_cpu(fmsb2->block_ec[i]);
1079 		fm->e[i] = e;
1080 	}
1081 
1082 	ubi->fm = fm;
1083 	ubi->fm_pool.max_size = ubi->fm->max_pool_size;
1084 	ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size;
1085 	ubi_msg("attached by fastmap");
1086 	ubi_msg("fastmap pool size: %d", ubi->fm_pool.max_size);
1087 	ubi_msg("fastmap WL pool size: %d", ubi->fm_wl_pool.max_size);
1088 	ubi->fm_disabled = 0;
1089 
1090 	ubi_free_vid_hdr(ubi, vh);
1091 	kfree(ech);
1092 out:
1093 	mutex_unlock(&ubi->fm_mutex);
1094 	if (ret == UBI_BAD_FASTMAP)
1095 		ubi_err("Attach by fastmap failed, doing a full scan!");
1096 	return ret;
1097 
1098 free_hdr:
1099 	ubi_free_vid_hdr(ubi, vh);
1100 	kfree(ech);
1101 free_fm_sb:
1102 	kfree(fmsb);
1103 	kfree(fm);
1104 	goto out;
1105 }
1106 
1107 /**
1108  * ubi_write_fastmap - writes a fastmap.
1109  * @ubi: UBI device object
1110  * @new_fm: the to be written fastmap
1111  *
1112  * Returns 0 on success, < 0 indicates an internal error.
1113  */
1114 static int ubi_write_fastmap(struct ubi_device *ubi,
1115 			     struct ubi_fastmap_layout *new_fm)
1116 {
1117 	size_t fm_pos = 0;
1118 	void *fm_raw;
1119 	struct ubi_fm_sb *fmsb;
1120 	struct ubi_fm_hdr *fmh;
1121 	struct ubi_fm_scan_pool *fmpl1, *fmpl2;
1122 	struct ubi_fm_ec *fec;
1123 	struct ubi_fm_volhdr *fvh;
1124 	struct ubi_fm_eba *feba;
1125 	struct rb_node *node;
1126 	struct ubi_wl_entry *wl_e;
1127 	struct ubi_volume *vol;
1128 	struct ubi_vid_hdr *avhdr, *dvhdr;
1129 	struct ubi_work *ubi_wrk;
1130 	int ret, i, j, free_peb_count, used_peb_count, vol_count;
1131 	int scrub_peb_count, erase_peb_count;
1132 
1133 	fm_raw = ubi->fm_buf;
1134 	memset(ubi->fm_buf, 0, ubi->fm_size);
1135 
1136 	avhdr = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
1137 	if (!avhdr) {
1138 		ret = -ENOMEM;
1139 		goto out;
1140 	}
1141 
1142 	dvhdr = new_fm_vhdr(ubi, UBI_FM_DATA_VOLUME_ID);
1143 	if (!dvhdr) {
1144 		ret = -ENOMEM;
1145 		goto out_kfree;
1146 	}
1147 
1148 	spin_lock(&ubi->volumes_lock);
1149 	spin_lock(&ubi->wl_lock);
1150 
1151 	fmsb = (struct ubi_fm_sb *)fm_raw;
1152 	fm_pos += sizeof(*fmsb);
1153 	ubi_assert(fm_pos <= ubi->fm_size);
1154 
1155 	fmh = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
1156 	fm_pos += sizeof(*fmh);
1157 	ubi_assert(fm_pos <= ubi->fm_size);
1158 
1159 	fmsb->magic = cpu_to_be32(UBI_FM_SB_MAGIC);
1160 	fmsb->version = UBI_FM_FMT_VERSION;
1161 	fmsb->used_blocks = cpu_to_be32(new_fm->used_blocks);
1162 	/* the max sqnum will be filled in while *reading* the fastmap */
1163 	fmsb->sqnum = 0;
1164 
1165 	fmh->magic = cpu_to_be32(UBI_FM_HDR_MAGIC);
1166 	free_peb_count = 0;
1167 	used_peb_count = 0;
1168 	scrub_peb_count = 0;
1169 	erase_peb_count = 0;
1170 	vol_count = 0;
1171 
1172 	fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1173 	fm_pos += sizeof(*fmpl1);
1174 	fmpl1->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1175 	fmpl1->size = cpu_to_be16(ubi->fm_pool.size);
1176 	fmpl1->max_size = cpu_to_be16(ubi->fm_pool.max_size);
1177 
1178 	for (i = 0; i < ubi->fm_pool.size; i++)
1179 		fmpl1->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]);
1180 
1181 	fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1182 	fm_pos += sizeof(*fmpl2);
1183 	fmpl2->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1184 	fmpl2->size = cpu_to_be16(ubi->fm_wl_pool.size);
1185 	fmpl2->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size);
1186 
1187 	for (i = 0; i < ubi->fm_wl_pool.size; i++)
1188 		fmpl2->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]);
1189 
1190 	for (node = rb_first(&ubi->free); node; node = rb_next(node)) {
1191 		wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
1192 		fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1193 
1194 		fec->pnum = cpu_to_be32(wl_e->pnum);
1195 		fec->ec = cpu_to_be32(wl_e->ec);
1196 
1197 		free_peb_count++;
1198 		fm_pos += sizeof(*fec);
1199 		ubi_assert(fm_pos <= ubi->fm_size);
1200 	}
1201 	fmh->free_peb_count = cpu_to_be32(free_peb_count);
1202 
1203 	for (node = rb_first(&ubi->used); node; node = rb_next(node)) {
1204 		wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
1205 		fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1206 
1207 		fec->pnum = cpu_to_be32(wl_e->pnum);
1208 		fec->ec = cpu_to_be32(wl_e->ec);
1209 
1210 		used_peb_count++;
1211 		fm_pos += sizeof(*fec);
1212 		ubi_assert(fm_pos <= ubi->fm_size);
1213 	}
1214 	fmh->used_peb_count = cpu_to_be32(used_peb_count);
1215 
1216 	for (node = rb_first(&ubi->scrub); node; node = rb_next(node)) {
1217 		wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
1218 		fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1219 
1220 		fec->pnum = cpu_to_be32(wl_e->pnum);
1221 		fec->ec = cpu_to_be32(wl_e->ec);
1222 
1223 		scrub_peb_count++;
1224 		fm_pos += sizeof(*fec);
1225 		ubi_assert(fm_pos <= ubi->fm_size);
1226 	}
1227 	fmh->scrub_peb_count = cpu_to_be32(scrub_peb_count);
1228 
1229 
1230 	list_for_each_entry(ubi_wrk, &ubi->works, list) {
1231 		if (ubi_is_erase_work(ubi_wrk)) {
1232 			wl_e = ubi_wrk->e;
1233 			ubi_assert(wl_e);
1234 
1235 			fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1236 
1237 			fec->pnum = cpu_to_be32(wl_e->pnum);
1238 			fec->ec = cpu_to_be32(wl_e->ec);
1239 
1240 			erase_peb_count++;
1241 			fm_pos += sizeof(*fec);
1242 			ubi_assert(fm_pos <= ubi->fm_size);
1243 		}
1244 	}
1245 	fmh->erase_peb_count = cpu_to_be32(erase_peb_count);
1246 
1247 	for (i = 0; i < UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT; i++) {
1248 		vol = ubi->volumes[i];
1249 
1250 		if (!vol)
1251 			continue;
1252 
1253 		vol_count++;
1254 
1255 		fvh = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
1256 		fm_pos += sizeof(*fvh);
1257 		ubi_assert(fm_pos <= ubi->fm_size);
1258 
1259 		fvh->magic = cpu_to_be32(UBI_FM_VHDR_MAGIC);
1260 		fvh->vol_id = cpu_to_be32(vol->vol_id);
1261 		fvh->vol_type = vol->vol_type;
1262 		fvh->used_ebs = cpu_to_be32(vol->used_ebs);
1263 		fvh->data_pad = cpu_to_be32(vol->data_pad);
1264 		fvh->last_eb_bytes = cpu_to_be32(vol->last_eb_bytes);
1265 
1266 		ubi_assert(vol->vol_type == UBI_DYNAMIC_VOLUME ||
1267 			vol->vol_type == UBI_STATIC_VOLUME);
1268 
1269 		feba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
1270 		fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->reserved_pebs);
1271 		ubi_assert(fm_pos <= ubi->fm_size);
1272 
1273 		for (j = 0; j < vol->reserved_pebs; j++)
1274 			feba->pnum[j] = cpu_to_be32(vol->eba_tbl[j]);
1275 
1276 		feba->reserved_pebs = cpu_to_be32(j);
1277 		feba->magic = cpu_to_be32(UBI_FM_EBA_MAGIC);
1278 	}
1279 	fmh->vol_count = cpu_to_be32(vol_count);
1280 	fmh->bad_peb_count = cpu_to_be32(ubi->bad_peb_count);
1281 
1282 	avhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1283 	avhdr->lnum = 0;
1284 
1285 	spin_unlock(&ubi->wl_lock);
1286 	spin_unlock(&ubi->volumes_lock);
1287 
1288 	dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum);
1289 	ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avhdr);
1290 	if (ret) {
1291 		ubi_err("unable to write vid_hdr to fastmap SB!");
1292 		goto out_kfree;
1293 	}
1294 
1295 	for (i = 0; i < new_fm->used_blocks; i++) {
1296 		fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum);
1297 		fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec);
1298 	}
1299 
1300 	fmsb->data_crc = 0;
1301 	fmsb->data_crc = cpu_to_be32(crc32(UBI_CRC32_INIT, fm_raw,
1302 					   ubi->fm_size));
1303 
1304 	for (i = 1; i < new_fm->used_blocks; i++) {
1305 		dvhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1306 		dvhdr->lnum = cpu_to_be32(i);
1307 		dbg_bld("writing fastmap data to PEB %i sqnum %llu",
1308 			new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum));
1309 		ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvhdr);
1310 		if (ret) {
1311 			ubi_err("unable to write vid_hdr to PEB %i!",
1312 				new_fm->e[i]->pnum);
1313 			goto out_kfree;
1314 		}
1315 	}
1316 
1317 	for (i = 0; i < new_fm->used_blocks; i++) {
1318 		ret = ubi_io_write(ubi, fm_raw + (i * ubi->leb_size),
1319 			new_fm->e[i]->pnum, ubi->leb_start, ubi->leb_size);
1320 		if (ret) {
1321 			ubi_err("unable to write fastmap to PEB %i!",
1322 				new_fm->e[i]->pnum);
1323 			goto out_kfree;
1324 		}
1325 	}
1326 
1327 	ubi_assert(new_fm);
1328 	ubi->fm = new_fm;
1329 
1330 	dbg_bld("fastmap written!");
1331 
1332 out_kfree:
1333 	ubi_free_vid_hdr(ubi, avhdr);
1334 	ubi_free_vid_hdr(ubi, dvhdr);
1335 out:
1336 	return ret;
1337 }
1338 
1339 /**
1340  * erase_block - Manually erase a PEB.
1341  * @ubi: UBI device object
1342  * @pnum: PEB to be erased
1343  *
1344  * Returns the new EC value on success, < 0 indicates an internal error.
1345  */
1346 static int erase_block(struct ubi_device *ubi, int pnum)
1347 {
1348 	int ret;
1349 	struct ubi_ec_hdr *ec_hdr;
1350 	long long ec;
1351 
1352 	ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
1353 	if (!ec_hdr)
1354 		return -ENOMEM;
1355 
1356 	ret = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1357 	if (ret < 0)
1358 		goto out;
1359 	else if (ret && ret != UBI_IO_BITFLIPS) {
1360 		ret = -EINVAL;
1361 		goto out;
1362 	}
1363 
1364 	ret = ubi_io_sync_erase(ubi, pnum, 0);
1365 	if (ret < 0)
1366 		goto out;
1367 
1368 	ec = be64_to_cpu(ec_hdr->ec);
1369 	ec += ret;
1370 	if (ec > UBI_MAX_ERASECOUNTER) {
1371 		ret = -EINVAL;
1372 		goto out;
1373 	}
1374 
1375 	ec_hdr->ec = cpu_to_be64(ec);
1376 	ret = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
1377 	if (ret < 0)
1378 		goto out;
1379 
1380 	ret = ec;
1381 out:
1382 	kfree(ec_hdr);
1383 	return ret;
1384 }
1385 
1386 /**
1387  * invalidate_fastmap - destroys a fastmap.
1388  * @ubi: UBI device object
1389  * @fm: the fastmap to be destroyed
1390  *
1391  * Returns 0 on success, < 0 indicates an internal error.
1392  */
1393 static int invalidate_fastmap(struct ubi_device *ubi,
1394 			      struct ubi_fastmap_layout *fm)
1395 {
1396 	int ret;
1397 	struct ubi_vid_hdr *vh;
1398 
1399 	ret = erase_block(ubi, fm->e[0]->pnum);
1400 	if (ret < 0)
1401 		return ret;
1402 
1403 	vh = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
1404 	if (!vh)
1405 		return -ENOMEM;
1406 
1407 	/* deleting the current fastmap SB is not enough, an old SB may exist,
1408 	 * so create a (corrupted) SB such that fastmap will find it and fall
1409 	 * back to scanning mode in any case */
1410 	vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1411 	ret = ubi_io_write_vid_hdr(ubi, fm->e[0]->pnum, vh);
1412 
1413 	return ret;
1414 }
1415 
1416 /**
1417  * ubi_update_fastmap - will be called by UBI if a volume changes or
1418  * a fastmap pool becomes full.
1419  * @ubi: UBI device object
1420  *
1421  * Returns 0 on success, < 0 indicates an internal error.
1422  */
1423 int ubi_update_fastmap(struct ubi_device *ubi)
1424 {
1425 	int ret, i;
1426 	struct ubi_fastmap_layout *new_fm, *old_fm;
1427 	struct ubi_wl_entry *tmp_e;
1428 
1429 	mutex_lock(&ubi->fm_mutex);
1430 
1431 	ubi_refill_pools(ubi);
1432 
1433 	if (ubi->ro_mode || ubi->fm_disabled) {
1434 		mutex_unlock(&ubi->fm_mutex);
1435 		return 0;
1436 	}
1437 
1438 	ret = ubi_ensure_anchor_pebs(ubi);
1439 	if (ret) {
1440 		mutex_unlock(&ubi->fm_mutex);
1441 		return ret;
1442 	}
1443 
1444 	new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
1445 	if (!new_fm) {
1446 		mutex_unlock(&ubi->fm_mutex);
1447 		return -ENOMEM;
1448 	}
1449 
1450 	new_fm->used_blocks = ubi->fm_size / ubi->leb_size;
1451 
1452 	for (i = 0; i < new_fm->used_blocks; i++) {
1453 		new_fm->e[i] = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1454 		if (!new_fm->e[i]) {
1455 			while (i--)
1456 				kfree(new_fm->e[i]);
1457 
1458 			kfree(new_fm);
1459 			mutex_unlock(&ubi->fm_mutex);
1460 			return -ENOMEM;
1461 		}
1462 	}
1463 
1464 	old_fm = ubi->fm;
1465 	ubi->fm = NULL;
1466 
1467 	if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) {
1468 		ubi_err("fastmap too large");
1469 		ret = -ENOSPC;
1470 		goto err;
1471 	}
1472 
1473 	for (i = 1; i < new_fm->used_blocks; i++) {
1474 		spin_lock(&ubi->wl_lock);
1475 		tmp_e = ubi_wl_get_fm_peb(ubi, 0);
1476 		spin_unlock(&ubi->wl_lock);
1477 
1478 		if (!tmp_e && !old_fm) {
1479 			int j;
1480 			ubi_err("could not get any free erase block");
1481 
1482 			for (j = 1; j < i; j++)
1483 				ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0);
1484 
1485 			ret = -ENOSPC;
1486 			goto err;
1487 		} else if (!tmp_e && old_fm) {
1488 			ret = erase_block(ubi, old_fm->e[i]->pnum);
1489 			if (ret < 0) {
1490 				int j;
1491 
1492 				for (j = 1; j < i; j++)
1493 					ubi_wl_put_fm_peb(ubi, new_fm->e[j],
1494 							  j, 0);
1495 
1496 				ubi_err("could not erase old fastmap PEB");
1497 				goto err;
1498 			}
1499 
1500 			new_fm->e[i]->pnum = old_fm->e[i]->pnum;
1501 			new_fm->e[i]->ec = old_fm->e[i]->ec;
1502 		} else {
1503 			new_fm->e[i]->pnum = tmp_e->pnum;
1504 			new_fm->e[i]->ec = tmp_e->ec;
1505 
1506 			if (old_fm)
1507 				ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
1508 						  old_fm->to_be_tortured[i]);
1509 		}
1510 	}
1511 
1512 	spin_lock(&ubi->wl_lock);
1513 	tmp_e = ubi_wl_get_fm_peb(ubi, 1);
1514 	spin_unlock(&ubi->wl_lock);
1515 
1516 	if (old_fm) {
1517 		/* no fresh anchor PEB was found, reuse the old one */
1518 		if (!tmp_e) {
1519 			ret = erase_block(ubi, old_fm->e[0]->pnum);
1520 			if (ret < 0) {
1521 				int i;
1522 				ubi_err("could not erase old anchor PEB");
1523 
1524 				for (i = 1; i < new_fm->used_blocks; i++)
1525 					ubi_wl_put_fm_peb(ubi, new_fm->e[i],
1526 							  i, 0);
1527 				goto err;
1528 			}
1529 
1530 			new_fm->e[0]->pnum = old_fm->e[0]->pnum;
1531 			new_fm->e[0]->ec = ret;
1532 		} else {
1533 			/* we've got a new anchor PEB, return the old one */
1534 			ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0,
1535 					  old_fm->to_be_tortured[0]);
1536 
1537 			new_fm->e[0]->pnum = tmp_e->pnum;
1538 			new_fm->e[0]->ec = tmp_e->ec;
1539 		}
1540 	} else {
1541 		if (!tmp_e) {
1542 			int i;
1543 			ubi_err("could not find any anchor PEB");
1544 
1545 			for (i = 1; i < new_fm->used_blocks; i++)
1546 				ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0);
1547 
1548 			ret = -ENOSPC;
1549 			goto err;
1550 		}
1551 
1552 		new_fm->e[0]->pnum = tmp_e->pnum;
1553 		new_fm->e[0]->ec = tmp_e->ec;
1554 	}
1555 
1556 	down_write(&ubi->work_sem);
1557 	down_write(&ubi->fm_sem);
1558 	ret = ubi_write_fastmap(ubi, new_fm);
1559 	up_write(&ubi->fm_sem);
1560 	up_write(&ubi->work_sem);
1561 
1562 	if (ret)
1563 		goto err;
1564 
1565 out_unlock:
1566 	mutex_unlock(&ubi->fm_mutex);
1567 	kfree(old_fm);
1568 	return ret;
1569 
1570 err:
1571 	kfree(new_fm);
1572 
1573 	ubi_warn("Unable to write new fastmap, err=%i", ret);
1574 
1575 	ret = 0;
1576 	if (old_fm) {
1577 		ret = invalidate_fastmap(ubi, old_fm);
1578 		if (ret < 0)
1579 			ubi_err("Unable to invalidiate current fastmap!");
1580 		else if (ret)
1581 			ret = 0;
1582 	}
1583 	goto out_unlock;
1584 }
1585