xref: /openbmc/linux/drivers/mtd/ubi/fastmap.c (revision 4a44a19b)
1 /*
2  * Copyright (c) 2012 Linutronix GmbH
3  * Author: Richard Weinberger <richard@nod.at>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; version 2.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
12  * the GNU General Public License for more details.
13  *
14  */
15 
16 #include <linux/crc32.h>
17 #include "ubi.h"
18 
19 /**
20  * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device.
21  * @ubi: UBI device description object
22  */
23 size_t ubi_calc_fm_size(struct ubi_device *ubi)
24 {
25 	size_t size;
26 
27 	size = sizeof(struct ubi_fm_sb) + \
28 		sizeof(struct ubi_fm_hdr) + \
29 		sizeof(struct ubi_fm_scan_pool) + \
30 		sizeof(struct ubi_fm_scan_pool) + \
31 		(ubi->peb_count * sizeof(struct ubi_fm_ec)) + \
32 		(sizeof(struct ubi_fm_eba) + \
33 		(ubi->peb_count * sizeof(__be32))) + \
34 		sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
35 	return roundup(size, ubi->leb_size);
36 }
37 
38 
39 /**
40  * new_fm_vhdr - allocate a new volume header for fastmap usage.
41  * @ubi: UBI device description object
42  * @vol_id: the VID of the new header
43  *
44  * Returns a new struct ubi_vid_hdr on success.
45  * NULL indicates out of memory.
46  */
47 static struct ubi_vid_hdr *new_fm_vhdr(struct ubi_device *ubi, int vol_id)
48 {
49 	struct ubi_vid_hdr *new;
50 
51 	new = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
52 	if (!new)
53 		goto out;
54 
55 	new->vol_type = UBI_VID_DYNAMIC;
56 	new->vol_id = cpu_to_be32(vol_id);
57 
58 	/* UBI implementations without fastmap support have to delete the
59 	 * fastmap.
60 	 */
61 	new->compat = UBI_COMPAT_DELETE;
62 
63 out:
64 	return new;
65 }
66 
67 /**
68  * add_aeb - create and add a attach erase block to a given list.
69  * @ai: UBI attach info object
70  * @list: the target list
71  * @pnum: PEB number of the new attach erase block
72  * @ec: erease counter of the new LEB
73  * @scrub: scrub this PEB after attaching
74  *
75  * Returns 0 on success, < 0 indicates an internal error.
76  */
77 static int add_aeb(struct ubi_attach_info *ai, struct list_head *list,
78 		   int pnum, int ec, int scrub)
79 {
80 	struct ubi_ainf_peb *aeb;
81 
82 	aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
83 	if (!aeb)
84 		return -ENOMEM;
85 
86 	aeb->pnum = pnum;
87 	aeb->ec = ec;
88 	aeb->lnum = -1;
89 	aeb->scrub = scrub;
90 	aeb->copy_flag = aeb->sqnum = 0;
91 
92 	ai->ec_sum += aeb->ec;
93 	ai->ec_count++;
94 
95 	if (ai->max_ec < aeb->ec)
96 		ai->max_ec = aeb->ec;
97 
98 	if (ai->min_ec > aeb->ec)
99 		ai->min_ec = aeb->ec;
100 
101 	list_add_tail(&aeb->u.list, list);
102 
103 	return 0;
104 }
105 
106 /**
107  * add_vol - create and add a new volume to ubi_attach_info.
108  * @ai: ubi_attach_info object
109  * @vol_id: VID of the new volume
110  * @used_ebs: number of used EBS
111  * @data_pad: data padding value of the new volume
112  * @vol_type: volume type
113  * @last_eb_bytes: number of bytes in the last LEB
114  *
115  * Returns the new struct ubi_ainf_volume on success.
116  * NULL indicates an error.
117  */
118 static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
119 				       int used_ebs, int data_pad, u8 vol_type,
120 				       int last_eb_bytes)
121 {
122 	struct ubi_ainf_volume *av;
123 	struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
124 
125 	while (*p) {
126 		parent = *p;
127 		av = rb_entry(parent, struct ubi_ainf_volume, rb);
128 
129 		if (vol_id > av->vol_id)
130 			p = &(*p)->rb_left;
131 		else
132 			p = &(*p)->rb_right;
133 	}
134 
135 	av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL);
136 	if (!av)
137 		goto out;
138 
139 	av->highest_lnum = av->leb_count = 0;
140 	av->vol_id = vol_id;
141 	av->used_ebs = used_ebs;
142 	av->data_pad = data_pad;
143 	av->last_data_size = last_eb_bytes;
144 	av->compat = 0;
145 	av->vol_type = vol_type;
146 	av->root = RB_ROOT;
147 
148 	dbg_bld("found volume (ID %i)", vol_id);
149 
150 	rb_link_node(&av->rb, parent, p);
151 	rb_insert_color(&av->rb, &ai->volumes);
152 
153 out:
154 	return av;
155 }
156 
157 /**
158  * assign_aeb_to_av - assigns a SEB to a given ainf_volume and removes it
159  * from it's original list.
160  * @ai: ubi_attach_info object
161  * @aeb: the to be assigned SEB
162  * @av: target scan volume
163  */
164 static void assign_aeb_to_av(struct ubi_attach_info *ai,
165 			     struct ubi_ainf_peb *aeb,
166 			     struct ubi_ainf_volume *av)
167 {
168 	struct ubi_ainf_peb *tmp_aeb;
169 	struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
170 
171 	p = &av->root.rb_node;
172 	while (*p) {
173 		parent = *p;
174 
175 		tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
176 		if (aeb->lnum != tmp_aeb->lnum) {
177 			if (aeb->lnum < tmp_aeb->lnum)
178 				p = &(*p)->rb_left;
179 			else
180 				p = &(*p)->rb_right;
181 
182 			continue;
183 		} else
184 			break;
185 	}
186 
187 	list_del(&aeb->u.list);
188 	av->leb_count++;
189 
190 	rb_link_node(&aeb->u.rb, parent, p);
191 	rb_insert_color(&aeb->u.rb, &av->root);
192 }
193 
194 /**
195  * update_vol - inserts or updates a LEB which was found a pool.
196  * @ubi: the UBI device object
197  * @ai: attach info object
198  * @av: the volume this LEB belongs to
199  * @new_vh: the volume header derived from new_aeb
200  * @new_aeb: the AEB to be examined
201  *
202  * Returns 0 on success, < 0 indicates an internal error.
203  */
204 static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
205 		      struct ubi_ainf_volume *av, struct ubi_vid_hdr *new_vh,
206 		      struct ubi_ainf_peb *new_aeb)
207 {
208 	struct rb_node **p = &av->root.rb_node, *parent = NULL;
209 	struct ubi_ainf_peb *aeb, *victim;
210 	int cmp_res;
211 
212 	while (*p) {
213 		parent = *p;
214 		aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
215 
216 		if (be32_to_cpu(new_vh->lnum) != aeb->lnum) {
217 			if (be32_to_cpu(new_vh->lnum) < aeb->lnum)
218 				p = &(*p)->rb_left;
219 			else
220 				p = &(*p)->rb_right;
221 
222 			continue;
223 		}
224 
225 		/* This case can happen if the fastmap gets written
226 		 * because of a volume change (creation, deletion, ..).
227 		 * Then a PEB can be within the persistent EBA and the pool.
228 		 */
229 		if (aeb->pnum == new_aeb->pnum) {
230 			ubi_assert(aeb->lnum == new_aeb->lnum);
231 			kmem_cache_free(ai->aeb_slab_cache, new_aeb);
232 
233 			return 0;
234 		}
235 
236 		cmp_res = ubi_compare_lebs(ubi, aeb, new_aeb->pnum, new_vh);
237 		if (cmp_res < 0)
238 			return cmp_res;
239 
240 		/* new_aeb is newer */
241 		if (cmp_res & 1) {
242 			victim = kmem_cache_alloc(ai->aeb_slab_cache,
243 				GFP_KERNEL);
244 			if (!victim)
245 				return -ENOMEM;
246 
247 			victim->ec = aeb->ec;
248 			victim->pnum = aeb->pnum;
249 			list_add_tail(&victim->u.list, &ai->erase);
250 
251 			if (av->highest_lnum == be32_to_cpu(new_vh->lnum))
252 				av->last_data_size = \
253 					be32_to_cpu(new_vh->data_size);
254 
255 			dbg_bld("vol %i: AEB %i's PEB %i is the newer",
256 				av->vol_id, aeb->lnum, new_aeb->pnum);
257 
258 			aeb->ec = new_aeb->ec;
259 			aeb->pnum = new_aeb->pnum;
260 			aeb->copy_flag = new_vh->copy_flag;
261 			aeb->scrub = new_aeb->scrub;
262 			kmem_cache_free(ai->aeb_slab_cache, new_aeb);
263 
264 		/* new_aeb is older */
265 		} else {
266 			dbg_bld("vol %i: AEB %i's PEB %i is old, dropping it",
267 				av->vol_id, aeb->lnum, new_aeb->pnum);
268 			list_add_tail(&new_aeb->u.list, &ai->erase);
269 		}
270 
271 		return 0;
272 	}
273 	/* This LEB is new, let's add it to the volume */
274 
275 	if (av->highest_lnum <= be32_to_cpu(new_vh->lnum)) {
276 		av->highest_lnum = be32_to_cpu(new_vh->lnum);
277 		av->last_data_size = be32_to_cpu(new_vh->data_size);
278 	}
279 
280 	if (av->vol_type == UBI_STATIC_VOLUME)
281 		av->used_ebs = be32_to_cpu(new_vh->used_ebs);
282 
283 	av->leb_count++;
284 
285 	rb_link_node(&new_aeb->u.rb, parent, p);
286 	rb_insert_color(&new_aeb->u.rb, &av->root);
287 
288 	return 0;
289 }
290 
291 /**
292  * process_pool_aeb - we found a non-empty PEB in a pool.
293  * @ubi: UBI device object
294  * @ai: attach info object
295  * @new_vh: the volume header derived from new_aeb
296  * @new_aeb: the AEB to be examined
297  *
298  * Returns 0 on success, < 0 indicates an internal error.
299  */
300 static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
301 			    struct ubi_vid_hdr *new_vh,
302 			    struct ubi_ainf_peb *new_aeb)
303 {
304 	struct ubi_ainf_volume *av, *tmp_av = NULL;
305 	struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
306 	int found = 0;
307 
308 	if (be32_to_cpu(new_vh->vol_id) == UBI_FM_SB_VOLUME_ID ||
309 		be32_to_cpu(new_vh->vol_id) == UBI_FM_DATA_VOLUME_ID) {
310 		kmem_cache_free(ai->aeb_slab_cache, new_aeb);
311 
312 		return 0;
313 	}
314 
315 	/* Find the volume this SEB belongs to */
316 	while (*p) {
317 		parent = *p;
318 		tmp_av = rb_entry(parent, struct ubi_ainf_volume, rb);
319 
320 		if (be32_to_cpu(new_vh->vol_id) > tmp_av->vol_id)
321 			p = &(*p)->rb_left;
322 		else if (be32_to_cpu(new_vh->vol_id) < tmp_av->vol_id)
323 			p = &(*p)->rb_right;
324 		else {
325 			found = 1;
326 			break;
327 		}
328 	}
329 
330 	if (found)
331 		av = tmp_av;
332 	else {
333 		ubi_err("orphaned volume in fastmap pool!");
334 		kmem_cache_free(ai->aeb_slab_cache, new_aeb);
335 		return UBI_BAD_FASTMAP;
336 	}
337 
338 	ubi_assert(be32_to_cpu(new_vh->vol_id) == av->vol_id);
339 
340 	return update_vol(ubi, ai, av, new_vh, new_aeb);
341 }
342 
343 /**
344  * unmap_peb - unmap a PEB.
345  * If fastmap detects a free PEB in the pool it has to check whether
346  * this PEB has been unmapped after writing the fastmap.
347  *
348  * @ai: UBI attach info object
349  * @pnum: The PEB to be unmapped
350  */
351 static void unmap_peb(struct ubi_attach_info *ai, int pnum)
352 {
353 	struct ubi_ainf_volume *av;
354 	struct rb_node *node, *node2;
355 	struct ubi_ainf_peb *aeb;
356 
357 	for (node = rb_first(&ai->volumes); node; node = rb_next(node)) {
358 		av = rb_entry(node, struct ubi_ainf_volume, rb);
359 
360 		for (node2 = rb_first(&av->root); node2;
361 		     node2 = rb_next(node2)) {
362 			aeb = rb_entry(node2, struct ubi_ainf_peb, u.rb);
363 			if (aeb->pnum == pnum) {
364 				rb_erase(&aeb->u.rb, &av->root);
365 				kmem_cache_free(ai->aeb_slab_cache, aeb);
366 				return;
367 			}
368 		}
369 	}
370 }
371 
372 /**
373  * scan_pool - scans a pool for changed (no longer empty PEBs).
374  * @ubi: UBI device object
375  * @ai: attach info object
376  * @pebs: an array of all PEB numbers in the to be scanned pool
377  * @pool_size: size of the pool (number of entries in @pebs)
378  * @max_sqnum: pointer to the maximal sequence number
379  * @eba_orphans: list of PEBs which need to be scanned
380  * @free: list of PEBs which are most likely free (and go into @ai->free)
381  *
382  * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned.
383  * < 0 indicates an internal error.
384  */
385 static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
386 		     int *pebs, int pool_size, unsigned long long *max_sqnum,
387 		     struct list_head *eba_orphans, struct list_head *free)
388 {
389 	struct ubi_vid_hdr *vh;
390 	struct ubi_ec_hdr *ech;
391 	struct ubi_ainf_peb *new_aeb, *tmp_aeb;
392 	int i, pnum, err, found_orphan, ret = 0;
393 
394 	ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
395 	if (!ech)
396 		return -ENOMEM;
397 
398 	vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
399 	if (!vh) {
400 		kfree(ech);
401 		return -ENOMEM;
402 	}
403 
404 	dbg_bld("scanning fastmap pool: size = %i", pool_size);
405 
406 	/*
407 	 * Now scan all PEBs in the pool to find changes which have been made
408 	 * after the creation of the fastmap
409 	 */
410 	for (i = 0; i < pool_size; i++) {
411 		int scrub = 0;
412 		int image_seq;
413 
414 		pnum = be32_to_cpu(pebs[i]);
415 
416 		if (ubi_io_is_bad(ubi, pnum)) {
417 			ubi_err("bad PEB in fastmap pool!");
418 			ret = UBI_BAD_FASTMAP;
419 			goto out;
420 		}
421 
422 		err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
423 		if (err && err != UBI_IO_BITFLIPS) {
424 			ubi_err("unable to read EC header! PEB:%i err:%i",
425 				pnum, err);
426 			ret = err > 0 ? UBI_BAD_FASTMAP : err;
427 			goto out;
428 		} else if (err == UBI_IO_BITFLIPS)
429 			scrub = 1;
430 
431 		/*
432 		 * Older UBI implementations have image_seq set to zero, so
433 		 * we shouldn't fail if image_seq == 0.
434 		 */
435 		image_seq = be32_to_cpu(ech->image_seq);
436 
437 		if (image_seq && (image_seq != ubi->image_seq)) {
438 			ubi_err("bad image seq: 0x%x, expected: 0x%x",
439 				be32_to_cpu(ech->image_seq), ubi->image_seq);
440 			ret = UBI_BAD_FASTMAP;
441 			goto out;
442 		}
443 
444 		err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
445 		if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) {
446 			unsigned long long ec = be64_to_cpu(ech->ec);
447 			unmap_peb(ai, pnum);
448 			dbg_bld("Adding PEB to free: %i", pnum);
449 			if (err == UBI_IO_FF_BITFLIPS)
450 				add_aeb(ai, free, pnum, ec, 1);
451 			else
452 				add_aeb(ai, free, pnum, ec, 0);
453 			continue;
454 		} else if (err == 0 || err == UBI_IO_BITFLIPS) {
455 			dbg_bld("Found non empty PEB:%i in pool", pnum);
456 
457 			if (err == UBI_IO_BITFLIPS)
458 				scrub = 1;
459 
460 			found_orphan = 0;
461 			list_for_each_entry(tmp_aeb, eba_orphans, u.list) {
462 				if (tmp_aeb->pnum == pnum) {
463 					found_orphan = 1;
464 					break;
465 				}
466 			}
467 			if (found_orphan) {
468 				list_del(&tmp_aeb->u.list);
469 				kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
470 			}
471 
472 			new_aeb = kmem_cache_alloc(ai->aeb_slab_cache,
473 						   GFP_KERNEL);
474 			if (!new_aeb) {
475 				ret = -ENOMEM;
476 				goto out;
477 			}
478 
479 			new_aeb->ec = be64_to_cpu(ech->ec);
480 			new_aeb->pnum = pnum;
481 			new_aeb->lnum = be32_to_cpu(vh->lnum);
482 			new_aeb->sqnum = be64_to_cpu(vh->sqnum);
483 			new_aeb->copy_flag = vh->copy_flag;
484 			new_aeb->scrub = scrub;
485 
486 			if (*max_sqnum < new_aeb->sqnum)
487 				*max_sqnum = new_aeb->sqnum;
488 
489 			err = process_pool_aeb(ubi, ai, vh, new_aeb);
490 			if (err) {
491 				ret = err > 0 ? UBI_BAD_FASTMAP : err;
492 				goto out;
493 			}
494 		} else {
495 			/* We are paranoid and fall back to scanning mode */
496 			ubi_err("fastmap pool PEBs contains damaged PEBs!");
497 			ret = err > 0 ? UBI_BAD_FASTMAP : err;
498 			goto out;
499 		}
500 
501 	}
502 
503 out:
504 	ubi_free_vid_hdr(ubi, vh);
505 	kfree(ech);
506 	return ret;
507 }
508 
509 /**
510  * count_fastmap_pebs - Counts the PEBs found by fastmap.
511  * @ai: The UBI attach info object
512  */
513 static int count_fastmap_pebs(struct ubi_attach_info *ai)
514 {
515 	struct ubi_ainf_peb *aeb;
516 	struct ubi_ainf_volume *av;
517 	struct rb_node *rb1, *rb2;
518 	int n = 0;
519 
520 	list_for_each_entry(aeb, &ai->erase, u.list)
521 		n++;
522 
523 	list_for_each_entry(aeb, &ai->free, u.list)
524 		n++;
525 
526 	 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
527 		ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
528 			n++;
529 
530 	return n;
531 }
532 
533 /**
534  * ubi_attach_fastmap - creates ubi_attach_info from a fastmap.
535  * @ubi: UBI device object
536  * @ai: UBI attach info object
537  * @fm: the fastmap to be attached
538  *
539  * Returns 0 on success, UBI_BAD_FASTMAP if the found fastmap was unusable.
540  * < 0 indicates an internal error.
541  */
542 static int ubi_attach_fastmap(struct ubi_device *ubi,
543 			      struct ubi_attach_info *ai,
544 			      struct ubi_fastmap_layout *fm)
545 {
546 	struct list_head used, eba_orphans, free;
547 	struct ubi_ainf_volume *av;
548 	struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb;
549 	struct ubi_ec_hdr *ech;
550 	struct ubi_fm_sb *fmsb;
551 	struct ubi_fm_hdr *fmhdr;
552 	struct ubi_fm_scan_pool *fmpl1, *fmpl2;
553 	struct ubi_fm_ec *fmec;
554 	struct ubi_fm_volhdr *fmvhdr;
555 	struct ubi_fm_eba *fm_eba;
556 	int ret, i, j, pool_size, wl_pool_size;
557 	size_t fm_pos = 0, fm_size = ubi->fm_size;
558 	unsigned long long max_sqnum = 0;
559 	void *fm_raw = ubi->fm_buf;
560 
561 	INIT_LIST_HEAD(&used);
562 	INIT_LIST_HEAD(&free);
563 	INIT_LIST_HEAD(&eba_orphans);
564 	INIT_LIST_HEAD(&ai->corr);
565 	INIT_LIST_HEAD(&ai->free);
566 	INIT_LIST_HEAD(&ai->erase);
567 	INIT_LIST_HEAD(&ai->alien);
568 	ai->volumes = RB_ROOT;
569 	ai->min_ec = UBI_MAX_ERASECOUNTER;
570 
571 	ai->aeb_slab_cache = kmem_cache_create("ubi_ainf_peb_slab",
572 					       sizeof(struct ubi_ainf_peb),
573 					       0, 0, NULL);
574 	if (!ai->aeb_slab_cache) {
575 		ret = -ENOMEM;
576 		goto fail;
577 	}
578 
579 	fmsb = (struct ubi_fm_sb *)(fm_raw);
580 	ai->max_sqnum = fmsb->sqnum;
581 	fm_pos += sizeof(struct ubi_fm_sb);
582 	if (fm_pos >= fm_size)
583 		goto fail_bad;
584 
585 	fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
586 	fm_pos += sizeof(*fmhdr);
587 	if (fm_pos >= fm_size)
588 		goto fail_bad;
589 
590 	if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) {
591 		ubi_err("bad fastmap header magic: 0x%x, expected: 0x%x",
592 			be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC);
593 		goto fail_bad;
594 	}
595 
596 	fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
597 	fm_pos += sizeof(*fmpl1);
598 	if (fm_pos >= fm_size)
599 		goto fail_bad;
600 	if (be32_to_cpu(fmpl1->magic) != UBI_FM_POOL_MAGIC) {
601 		ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x",
602 			be32_to_cpu(fmpl1->magic), UBI_FM_POOL_MAGIC);
603 		goto fail_bad;
604 	}
605 
606 	fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
607 	fm_pos += sizeof(*fmpl2);
608 	if (fm_pos >= fm_size)
609 		goto fail_bad;
610 	if (be32_to_cpu(fmpl2->magic) != UBI_FM_POOL_MAGIC) {
611 		ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x",
612 			be32_to_cpu(fmpl2->magic), UBI_FM_POOL_MAGIC);
613 		goto fail_bad;
614 	}
615 
616 	pool_size = be16_to_cpu(fmpl1->size);
617 	wl_pool_size = be16_to_cpu(fmpl2->size);
618 	fm->max_pool_size = be16_to_cpu(fmpl1->max_size);
619 	fm->max_wl_pool_size = be16_to_cpu(fmpl2->max_size);
620 
621 	if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) {
622 		ubi_err("bad pool size: %i", pool_size);
623 		goto fail_bad;
624 	}
625 
626 	if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) {
627 		ubi_err("bad WL pool size: %i", wl_pool_size);
628 		goto fail_bad;
629 	}
630 
631 
632 	if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE ||
633 	    fm->max_pool_size < 0) {
634 		ubi_err("bad maximal pool size: %i", fm->max_pool_size);
635 		goto fail_bad;
636 	}
637 
638 	if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE ||
639 	    fm->max_wl_pool_size < 0) {
640 		ubi_err("bad maximal WL pool size: %i", fm->max_wl_pool_size);
641 		goto fail_bad;
642 	}
643 
644 	/* read EC values from free list */
645 	for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) {
646 		fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
647 		fm_pos += sizeof(*fmec);
648 		if (fm_pos >= fm_size)
649 			goto fail_bad;
650 
651 		add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
652 			be32_to_cpu(fmec->ec), 0);
653 	}
654 
655 	/* read EC values from used list */
656 	for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) {
657 		fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
658 		fm_pos += sizeof(*fmec);
659 		if (fm_pos >= fm_size)
660 			goto fail_bad;
661 
662 		add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
663 			be32_to_cpu(fmec->ec), 0);
664 	}
665 
666 	/* read EC values from scrub list */
667 	for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) {
668 		fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
669 		fm_pos += sizeof(*fmec);
670 		if (fm_pos >= fm_size)
671 			goto fail_bad;
672 
673 		add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
674 			be32_to_cpu(fmec->ec), 1);
675 	}
676 
677 	/* read EC values from erase list */
678 	for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) {
679 		fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
680 		fm_pos += sizeof(*fmec);
681 		if (fm_pos >= fm_size)
682 			goto fail_bad;
683 
684 		add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
685 			be32_to_cpu(fmec->ec), 1);
686 	}
687 
688 	ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
689 	ai->bad_peb_count = be32_to_cpu(fmhdr->bad_peb_count);
690 
691 	/* Iterate over all volumes and read their EBA table */
692 	for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) {
693 		fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
694 		fm_pos += sizeof(*fmvhdr);
695 		if (fm_pos >= fm_size)
696 			goto fail_bad;
697 
698 		if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) {
699 			ubi_err("bad fastmap vol header magic: 0x%x, " \
700 				"expected: 0x%x",
701 				be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC);
702 			goto fail_bad;
703 		}
704 
705 		av = add_vol(ai, be32_to_cpu(fmvhdr->vol_id),
706 			     be32_to_cpu(fmvhdr->used_ebs),
707 			     be32_to_cpu(fmvhdr->data_pad),
708 			     fmvhdr->vol_type,
709 			     be32_to_cpu(fmvhdr->last_eb_bytes));
710 
711 		if (!av)
712 			goto fail_bad;
713 
714 		ai->vols_found++;
715 		if (ai->highest_vol_id < be32_to_cpu(fmvhdr->vol_id))
716 			ai->highest_vol_id = be32_to_cpu(fmvhdr->vol_id);
717 
718 		fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
719 		fm_pos += sizeof(*fm_eba);
720 		fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs));
721 		if (fm_pos >= fm_size)
722 			goto fail_bad;
723 
724 		if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) {
725 			ubi_err("bad fastmap EBA header magic: 0x%x, " \
726 				"expected: 0x%x",
727 				be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC);
728 			goto fail_bad;
729 		}
730 
731 		for (j = 0; j < be32_to_cpu(fm_eba->reserved_pebs); j++) {
732 			int pnum = be32_to_cpu(fm_eba->pnum[j]);
733 
734 			if ((int)be32_to_cpu(fm_eba->pnum[j]) < 0)
735 				continue;
736 
737 			aeb = NULL;
738 			list_for_each_entry(tmp_aeb, &used, u.list) {
739 				if (tmp_aeb->pnum == pnum) {
740 					aeb = tmp_aeb;
741 					break;
742 				}
743 			}
744 
745 			/* This can happen if a PEB is already in an EBA known
746 			 * by this fastmap but the PEB itself is not in the used
747 			 * list.
748 			 * In this case the PEB can be within the fastmap pool
749 			 * or while writing the fastmap it was in the protection
750 			 * queue.
751 			 */
752 			if (!aeb) {
753 				aeb = kmem_cache_alloc(ai->aeb_slab_cache,
754 						       GFP_KERNEL);
755 				if (!aeb) {
756 					ret = -ENOMEM;
757 
758 					goto fail;
759 				}
760 
761 				aeb->lnum = j;
762 				aeb->pnum = be32_to_cpu(fm_eba->pnum[j]);
763 				aeb->ec = -1;
764 				aeb->scrub = aeb->copy_flag = aeb->sqnum = 0;
765 				list_add_tail(&aeb->u.list, &eba_orphans);
766 				continue;
767 			}
768 
769 			aeb->lnum = j;
770 
771 			if (av->highest_lnum <= aeb->lnum)
772 				av->highest_lnum = aeb->lnum;
773 
774 			assign_aeb_to_av(ai, aeb, av);
775 
776 			dbg_bld("inserting PEB:%i (LEB %i) to vol %i",
777 				aeb->pnum, aeb->lnum, av->vol_id);
778 		}
779 
780 		ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
781 		if (!ech) {
782 			ret = -ENOMEM;
783 			goto fail;
784 		}
785 
786 		list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &eba_orphans,
787 					 u.list) {
788 			int err;
789 
790 			if (ubi_io_is_bad(ubi, tmp_aeb->pnum)) {
791 				ubi_err("bad PEB in fastmap EBA orphan list");
792 				ret = UBI_BAD_FASTMAP;
793 				kfree(ech);
794 				goto fail;
795 			}
796 
797 			err = ubi_io_read_ec_hdr(ubi, tmp_aeb->pnum, ech, 0);
798 			if (err && err != UBI_IO_BITFLIPS) {
799 				ubi_err("unable to read EC header! PEB:%i " \
800 					"err:%i", tmp_aeb->pnum, err);
801 				ret = err > 0 ? UBI_BAD_FASTMAP : err;
802 				kfree(ech);
803 
804 				goto fail;
805 			} else if (err == UBI_IO_BITFLIPS)
806 				tmp_aeb->scrub = 1;
807 
808 			tmp_aeb->ec = be64_to_cpu(ech->ec);
809 			assign_aeb_to_av(ai, tmp_aeb, av);
810 		}
811 
812 		kfree(ech);
813 	}
814 
815 	ret = scan_pool(ubi, ai, fmpl1->pebs, pool_size, &max_sqnum,
816 			&eba_orphans, &free);
817 	if (ret)
818 		goto fail;
819 
820 	ret = scan_pool(ubi, ai, fmpl2->pebs, wl_pool_size, &max_sqnum,
821 			&eba_orphans, &free);
822 	if (ret)
823 		goto fail;
824 
825 	if (max_sqnum > ai->max_sqnum)
826 		ai->max_sqnum = max_sqnum;
827 
828 	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list)
829 		list_move_tail(&tmp_aeb->u.list, &ai->free);
830 
831 	ubi_assert(list_empty(&used));
832 	ubi_assert(list_empty(&eba_orphans));
833 	ubi_assert(list_empty(&free));
834 
835 	/*
836 	 * If fastmap is leaking PEBs (must not happen), raise a
837 	 * fat warning and fall back to scanning mode.
838 	 * We do this here because in ubi_wl_init() it's too late
839 	 * and we cannot fall back to scanning.
840 	 */
841 	if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count -
842 		    ai->bad_peb_count - fm->used_blocks))
843 		goto fail_bad;
844 
845 	return 0;
846 
847 fail_bad:
848 	ret = UBI_BAD_FASTMAP;
849 fail:
850 	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) {
851 		list_del(&tmp_aeb->u.list);
852 		kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
853 	}
854 	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &eba_orphans, u.list) {
855 		list_del(&tmp_aeb->u.list);
856 		kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
857 	}
858 	list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) {
859 		list_del(&tmp_aeb->u.list);
860 		kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
861 	}
862 
863 	return ret;
864 }
865 
866 /**
867  * ubi_scan_fastmap - scan the fastmap.
868  * @ubi: UBI device object
869  * @ai: UBI attach info to be filled
870  * @fm_anchor: The fastmap starts at this PEB
871  *
872  * Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found,
873  * UBI_BAD_FASTMAP if one was found but is not usable.
874  * < 0 indicates an internal error.
875  */
876 int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
877 		     int fm_anchor)
878 {
879 	struct ubi_fm_sb *fmsb, *fmsb2;
880 	struct ubi_vid_hdr *vh;
881 	struct ubi_ec_hdr *ech;
882 	struct ubi_fastmap_layout *fm;
883 	int i, used_blocks, pnum, ret = 0;
884 	size_t fm_size;
885 	__be32 crc, tmp_crc;
886 	unsigned long long sqnum = 0;
887 
888 	mutex_lock(&ubi->fm_mutex);
889 	memset(ubi->fm_buf, 0, ubi->fm_size);
890 
891 	fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL);
892 	if (!fmsb) {
893 		ret = -ENOMEM;
894 		goto out;
895 	}
896 
897 	fm = kzalloc(sizeof(*fm), GFP_KERNEL);
898 	if (!fm) {
899 		ret = -ENOMEM;
900 		kfree(fmsb);
901 		goto out;
902 	}
903 
904 	ret = ubi_io_read(ubi, fmsb, fm_anchor, ubi->leb_start, sizeof(*fmsb));
905 	if (ret && ret != UBI_IO_BITFLIPS)
906 		goto free_fm_sb;
907 	else if (ret == UBI_IO_BITFLIPS)
908 		fm->to_be_tortured[0] = 1;
909 
910 	if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) {
911 		ubi_err("bad super block magic: 0x%x, expected: 0x%x",
912 			be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC);
913 		ret = UBI_BAD_FASTMAP;
914 		goto free_fm_sb;
915 	}
916 
917 	if (fmsb->version != UBI_FM_FMT_VERSION) {
918 		ubi_err("bad fastmap version: %i, expected: %i",
919 			fmsb->version, UBI_FM_FMT_VERSION);
920 		ret = UBI_BAD_FASTMAP;
921 		goto free_fm_sb;
922 	}
923 
924 	used_blocks = be32_to_cpu(fmsb->used_blocks);
925 	if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) {
926 		ubi_err("number of fastmap blocks is invalid: %i", used_blocks);
927 		ret = UBI_BAD_FASTMAP;
928 		goto free_fm_sb;
929 	}
930 
931 	fm_size = ubi->leb_size * used_blocks;
932 	if (fm_size != ubi->fm_size) {
933 		ubi_err("bad fastmap size: %zi, expected: %zi", fm_size,
934 			ubi->fm_size);
935 		ret = UBI_BAD_FASTMAP;
936 		goto free_fm_sb;
937 	}
938 
939 	ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
940 	if (!ech) {
941 		ret = -ENOMEM;
942 		goto free_fm_sb;
943 	}
944 
945 	vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
946 	if (!vh) {
947 		ret = -ENOMEM;
948 		goto free_hdr;
949 	}
950 
951 	for (i = 0; i < used_blocks; i++) {
952 		int image_seq;
953 
954 		pnum = be32_to_cpu(fmsb->block_loc[i]);
955 
956 		if (ubi_io_is_bad(ubi, pnum)) {
957 			ret = UBI_BAD_FASTMAP;
958 			goto free_hdr;
959 		}
960 
961 		ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
962 		if (ret && ret != UBI_IO_BITFLIPS) {
963 			ubi_err("unable to read fastmap block# %i EC (PEB: %i)",
964 				i, pnum);
965 			if (ret > 0)
966 				ret = UBI_BAD_FASTMAP;
967 			goto free_hdr;
968 		} else if (ret == UBI_IO_BITFLIPS)
969 			fm->to_be_tortured[i] = 1;
970 
971 		image_seq = be32_to_cpu(ech->image_seq);
972 		if (!ubi->image_seq)
973 			ubi->image_seq = image_seq;
974 
975 		/*
976 		 * Older UBI implementations have image_seq set to zero, so
977 		 * we shouldn't fail if image_seq == 0.
978 		 */
979 		if (image_seq && (image_seq != ubi->image_seq)) {
980 			ubi_err("wrong image seq:%d instead of %d",
981 				be32_to_cpu(ech->image_seq), ubi->image_seq);
982 			ret = UBI_BAD_FASTMAP;
983 			goto free_hdr;
984 		}
985 
986 		ret = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
987 		if (ret && ret != UBI_IO_BITFLIPS) {
988 			ubi_err("unable to read fastmap block# %i (PEB: %i)",
989 				i, pnum);
990 			goto free_hdr;
991 		}
992 
993 		if (i == 0) {
994 			if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) {
995 				ubi_err("bad fastmap anchor vol_id: 0x%x," \
996 					" expected: 0x%x",
997 					be32_to_cpu(vh->vol_id),
998 					UBI_FM_SB_VOLUME_ID);
999 				ret = UBI_BAD_FASTMAP;
1000 				goto free_hdr;
1001 			}
1002 		} else {
1003 			if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) {
1004 				ubi_err("bad fastmap data vol_id: 0x%x," \
1005 					" expected: 0x%x",
1006 					be32_to_cpu(vh->vol_id),
1007 					UBI_FM_DATA_VOLUME_ID);
1008 				ret = UBI_BAD_FASTMAP;
1009 				goto free_hdr;
1010 			}
1011 		}
1012 
1013 		if (sqnum < be64_to_cpu(vh->sqnum))
1014 			sqnum = be64_to_cpu(vh->sqnum);
1015 
1016 		ret = ubi_io_read(ubi, ubi->fm_buf + (ubi->leb_size * i), pnum,
1017 				  ubi->leb_start, ubi->leb_size);
1018 		if (ret && ret != UBI_IO_BITFLIPS) {
1019 			ubi_err("unable to read fastmap block# %i (PEB: %i, " \
1020 				"err: %i)", i, pnum, ret);
1021 			goto free_hdr;
1022 		}
1023 	}
1024 
1025 	kfree(fmsb);
1026 	fmsb = NULL;
1027 
1028 	fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf);
1029 	tmp_crc = be32_to_cpu(fmsb2->data_crc);
1030 	fmsb2->data_crc = 0;
1031 	crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size);
1032 	if (crc != tmp_crc) {
1033 		ubi_err("fastmap data CRC is invalid");
1034 		ubi_err("CRC should be: 0x%x, calc: 0x%x", tmp_crc, crc);
1035 		ret = UBI_BAD_FASTMAP;
1036 		goto free_hdr;
1037 	}
1038 
1039 	fmsb2->sqnum = sqnum;
1040 
1041 	fm->used_blocks = used_blocks;
1042 
1043 	ret = ubi_attach_fastmap(ubi, ai, fm);
1044 	if (ret) {
1045 		if (ret > 0)
1046 			ret = UBI_BAD_FASTMAP;
1047 		goto free_hdr;
1048 	}
1049 
1050 	for (i = 0; i < used_blocks; i++) {
1051 		struct ubi_wl_entry *e;
1052 
1053 		e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1054 		if (!e) {
1055 			while (i--)
1056 				kfree(fm->e[i]);
1057 
1058 			ret = -ENOMEM;
1059 			goto free_hdr;
1060 		}
1061 
1062 		e->pnum = be32_to_cpu(fmsb2->block_loc[i]);
1063 		e->ec = be32_to_cpu(fmsb2->block_ec[i]);
1064 		fm->e[i] = e;
1065 	}
1066 
1067 	ubi->fm = fm;
1068 	ubi->fm_pool.max_size = ubi->fm->max_pool_size;
1069 	ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size;
1070 	ubi_msg("attached by fastmap");
1071 	ubi_msg("fastmap pool size: %d", ubi->fm_pool.max_size);
1072 	ubi_msg("fastmap WL pool size: %d", ubi->fm_wl_pool.max_size);
1073 	ubi->fm_disabled = 0;
1074 
1075 	ubi_free_vid_hdr(ubi, vh);
1076 	kfree(ech);
1077 out:
1078 	mutex_unlock(&ubi->fm_mutex);
1079 	if (ret == UBI_BAD_FASTMAP)
1080 		ubi_err("Attach by fastmap failed, doing a full scan!");
1081 	return ret;
1082 
1083 free_hdr:
1084 	ubi_free_vid_hdr(ubi, vh);
1085 	kfree(ech);
1086 free_fm_sb:
1087 	kfree(fmsb);
1088 	kfree(fm);
1089 	goto out;
1090 }
1091 
1092 /**
1093  * ubi_write_fastmap - writes a fastmap.
1094  * @ubi: UBI device object
1095  * @new_fm: the to be written fastmap
1096  *
1097  * Returns 0 on success, < 0 indicates an internal error.
1098  */
1099 static int ubi_write_fastmap(struct ubi_device *ubi,
1100 			     struct ubi_fastmap_layout *new_fm)
1101 {
1102 	size_t fm_pos = 0;
1103 	void *fm_raw;
1104 	struct ubi_fm_sb *fmsb;
1105 	struct ubi_fm_hdr *fmh;
1106 	struct ubi_fm_scan_pool *fmpl1, *fmpl2;
1107 	struct ubi_fm_ec *fec;
1108 	struct ubi_fm_volhdr *fvh;
1109 	struct ubi_fm_eba *feba;
1110 	struct rb_node *node;
1111 	struct ubi_wl_entry *wl_e;
1112 	struct ubi_volume *vol;
1113 	struct ubi_vid_hdr *avhdr, *dvhdr;
1114 	struct ubi_work *ubi_wrk;
1115 	int ret, i, j, free_peb_count, used_peb_count, vol_count;
1116 	int scrub_peb_count, erase_peb_count;
1117 
1118 	fm_raw = ubi->fm_buf;
1119 	memset(ubi->fm_buf, 0, ubi->fm_size);
1120 
1121 	avhdr = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
1122 	if (!avhdr) {
1123 		ret = -ENOMEM;
1124 		goto out;
1125 	}
1126 
1127 	dvhdr = new_fm_vhdr(ubi, UBI_FM_DATA_VOLUME_ID);
1128 	if (!dvhdr) {
1129 		ret = -ENOMEM;
1130 		goto out_kfree;
1131 	}
1132 
1133 	spin_lock(&ubi->volumes_lock);
1134 	spin_lock(&ubi->wl_lock);
1135 
1136 	fmsb = (struct ubi_fm_sb *)fm_raw;
1137 	fm_pos += sizeof(*fmsb);
1138 	ubi_assert(fm_pos <= ubi->fm_size);
1139 
1140 	fmh = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
1141 	fm_pos += sizeof(*fmh);
1142 	ubi_assert(fm_pos <= ubi->fm_size);
1143 
1144 	fmsb->magic = cpu_to_be32(UBI_FM_SB_MAGIC);
1145 	fmsb->version = UBI_FM_FMT_VERSION;
1146 	fmsb->used_blocks = cpu_to_be32(new_fm->used_blocks);
1147 	/* the max sqnum will be filled in while *reading* the fastmap */
1148 	fmsb->sqnum = 0;
1149 
1150 	fmh->magic = cpu_to_be32(UBI_FM_HDR_MAGIC);
1151 	free_peb_count = 0;
1152 	used_peb_count = 0;
1153 	scrub_peb_count = 0;
1154 	erase_peb_count = 0;
1155 	vol_count = 0;
1156 
1157 	fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1158 	fm_pos += sizeof(*fmpl1);
1159 	fmpl1->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1160 	fmpl1->size = cpu_to_be16(ubi->fm_pool.size);
1161 	fmpl1->max_size = cpu_to_be16(ubi->fm_pool.max_size);
1162 
1163 	for (i = 0; i < ubi->fm_pool.size; i++)
1164 		fmpl1->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]);
1165 
1166 	fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1167 	fm_pos += sizeof(*fmpl2);
1168 	fmpl2->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1169 	fmpl2->size = cpu_to_be16(ubi->fm_wl_pool.size);
1170 	fmpl2->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size);
1171 
1172 	for (i = 0; i < ubi->fm_wl_pool.size; i++)
1173 		fmpl2->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]);
1174 
1175 	for (node = rb_first(&ubi->free); node; node = rb_next(node)) {
1176 		wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
1177 		fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1178 
1179 		fec->pnum = cpu_to_be32(wl_e->pnum);
1180 		fec->ec = cpu_to_be32(wl_e->ec);
1181 
1182 		free_peb_count++;
1183 		fm_pos += sizeof(*fec);
1184 		ubi_assert(fm_pos <= ubi->fm_size);
1185 	}
1186 	fmh->free_peb_count = cpu_to_be32(free_peb_count);
1187 
1188 	for (node = rb_first(&ubi->used); node; node = rb_next(node)) {
1189 		wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
1190 		fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1191 
1192 		fec->pnum = cpu_to_be32(wl_e->pnum);
1193 		fec->ec = cpu_to_be32(wl_e->ec);
1194 
1195 		used_peb_count++;
1196 		fm_pos += sizeof(*fec);
1197 		ubi_assert(fm_pos <= ubi->fm_size);
1198 	}
1199 	fmh->used_peb_count = cpu_to_be32(used_peb_count);
1200 
1201 	for (node = rb_first(&ubi->scrub); node; node = rb_next(node)) {
1202 		wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
1203 		fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1204 
1205 		fec->pnum = cpu_to_be32(wl_e->pnum);
1206 		fec->ec = cpu_to_be32(wl_e->ec);
1207 
1208 		scrub_peb_count++;
1209 		fm_pos += sizeof(*fec);
1210 		ubi_assert(fm_pos <= ubi->fm_size);
1211 	}
1212 	fmh->scrub_peb_count = cpu_to_be32(scrub_peb_count);
1213 
1214 
1215 	list_for_each_entry(ubi_wrk, &ubi->works, list) {
1216 		if (ubi_is_erase_work(ubi_wrk)) {
1217 			wl_e = ubi_wrk->e;
1218 			ubi_assert(wl_e);
1219 
1220 			fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1221 
1222 			fec->pnum = cpu_to_be32(wl_e->pnum);
1223 			fec->ec = cpu_to_be32(wl_e->ec);
1224 
1225 			erase_peb_count++;
1226 			fm_pos += sizeof(*fec);
1227 			ubi_assert(fm_pos <= ubi->fm_size);
1228 		}
1229 	}
1230 	fmh->erase_peb_count = cpu_to_be32(erase_peb_count);
1231 
1232 	for (i = 0; i < UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT; i++) {
1233 		vol = ubi->volumes[i];
1234 
1235 		if (!vol)
1236 			continue;
1237 
1238 		vol_count++;
1239 
1240 		fvh = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
1241 		fm_pos += sizeof(*fvh);
1242 		ubi_assert(fm_pos <= ubi->fm_size);
1243 
1244 		fvh->magic = cpu_to_be32(UBI_FM_VHDR_MAGIC);
1245 		fvh->vol_id = cpu_to_be32(vol->vol_id);
1246 		fvh->vol_type = vol->vol_type;
1247 		fvh->used_ebs = cpu_to_be32(vol->used_ebs);
1248 		fvh->data_pad = cpu_to_be32(vol->data_pad);
1249 		fvh->last_eb_bytes = cpu_to_be32(vol->last_eb_bytes);
1250 
1251 		ubi_assert(vol->vol_type == UBI_DYNAMIC_VOLUME ||
1252 			vol->vol_type == UBI_STATIC_VOLUME);
1253 
1254 		feba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
1255 		fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->reserved_pebs);
1256 		ubi_assert(fm_pos <= ubi->fm_size);
1257 
1258 		for (j = 0; j < vol->reserved_pebs; j++)
1259 			feba->pnum[j] = cpu_to_be32(vol->eba_tbl[j]);
1260 
1261 		feba->reserved_pebs = cpu_to_be32(j);
1262 		feba->magic = cpu_to_be32(UBI_FM_EBA_MAGIC);
1263 	}
1264 	fmh->vol_count = cpu_to_be32(vol_count);
1265 	fmh->bad_peb_count = cpu_to_be32(ubi->bad_peb_count);
1266 
1267 	avhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1268 	avhdr->lnum = 0;
1269 
1270 	spin_unlock(&ubi->wl_lock);
1271 	spin_unlock(&ubi->volumes_lock);
1272 
1273 	dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum);
1274 	ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avhdr);
1275 	if (ret) {
1276 		ubi_err("unable to write vid_hdr to fastmap SB!");
1277 		goto out_kfree;
1278 	}
1279 
1280 	for (i = 0; i < new_fm->used_blocks; i++) {
1281 		fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum);
1282 		fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec);
1283 	}
1284 
1285 	fmsb->data_crc = 0;
1286 	fmsb->data_crc = cpu_to_be32(crc32(UBI_CRC32_INIT, fm_raw,
1287 					   ubi->fm_size));
1288 
1289 	for (i = 1; i < new_fm->used_blocks; i++) {
1290 		dvhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1291 		dvhdr->lnum = cpu_to_be32(i);
1292 		dbg_bld("writing fastmap data to PEB %i sqnum %llu",
1293 			new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum));
1294 		ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvhdr);
1295 		if (ret) {
1296 			ubi_err("unable to write vid_hdr to PEB %i!",
1297 				new_fm->e[i]->pnum);
1298 			goto out_kfree;
1299 		}
1300 	}
1301 
1302 	for (i = 0; i < new_fm->used_blocks; i++) {
1303 		ret = ubi_io_write(ubi, fm_raw + (i * ubi->leb_size),
1304 			new_fm->e[i]->pnum, ubi->leb_start, ubi->leb_size);
1305 		if (ret) {
1306 			ubi_err("unable to write fastmap to PEB %i!",
1307 				new_fm->e[i]->pnum);
1308 			goto out_kfree;
1309 		}
1310 	}
1311 
1312 	ubi_assert(new_fm);
1313 	ubi->fm = new_fm;
1314 
1315 	dbg_bld("fastmap written!");
1316 
1317 out_kfree:
1318 	ubi_free_vid_hdr(ubi, avhdr);
1319 	ubi_free_vid_hdr(ubi, dvhdr);
1320 out:
1321 	return ret;
1322 }
1323 
1324 /**
1325  * erase_block - Manually erase a PEB.
1326  * @ubi: UBI device object
1327  * @pnum: PEB to be erased
1328  *
1329  * Returns the new EC value on success, < 0 indicates an internal error.
1330  */
1331 static int erase_block(struct ubi_device *ubi, int pnum)
1332 {
1333 	int ret;
1334 	struct ubi_ec_hdr *ec_hdr;
1335 	long long ec;
1336 
1337 	ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
1338 	if (!ec_hdr)
1339 		return -ENOMEM;
1340 
1341 	ret = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1342 	if (ret < 0)
1343 		goto out;
1344 	else if (ret && ret != UBI_IO_BITFLIPS) {
1345 		ret = -EINVAL;
1346 		goto out;
1347 	}
1348 
1349 	ret = ubi_io_sync_erase(ubi, pnum, 0);
1350 	if (ret < 0)
1351 		goto out;
1352 
1353 	ec = be64_to_cpu(ec_hdr->ec);
1354 	ec += ret;
1355 	if (ec > UBI_MAX_ERASECOUNTER) {
1356 		ret = -EINVAL;
1357 		goto out;
1358 	}
1359 
1360 	ec_hdr->ec = cpu_to_be64(ec);
1361 	ret = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
1362 	if (ret < 0)
1363 		goto out;
1364 
1365 	ret = ec;
1366 out:
1367 	kfree(ec_hdr);
1368 	return ret;
1369 }
1370 
1371 /**
1372  * invalidate_fastmap - destroys a fastmap.
1373  * @ubi: UBI device object
1374  * @fm: the fastmap to be destroyed
1375  *
1376  * Returns 0 on success, < 0 indicates an internal error.
1377  */
1378 static int invalidate_fastmap(struct ubi_device *ubi,
1379 			      struct ubi_fastmap_layout *fm)
1380 {
1381 	int ret;
1382 	struct ubi_vid_hdr *vh;
1383 
1384 	ret = erase_block(ubi, fm->e[0]->pnum);
1385 	if (ret < 0)
1386 		return ret;
1387 
1388 	vh = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
1389 	if (!vh)
1390 		return -ENOMEM;
1391 
1392 	/* deleting the current fastmap SB is not enough, an old SB may exist,
1393 	 * so create a (corrupted) SB such that fastmap will find it and fall
1394 	 * back to scanning mode in any case */
1395 	vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1396 	ret = ubi_io_write_vid_hdr(ubi, fm->e[0]->pnum, vh);
1397 
1398 	return ret;
1399 }
1400 
1401 /**
1402  * ubi_update_fastmap - will be called by UBI if a volume changes or
1403  * a fastmap pool becomes full.
1404  * @ubi: UBI device object
1405  *
1406  * Returns 0 on success, < 0 indicates an internal error.
1407  */
1408 int ubi_update_fastmap(struct ubi_device *ubi)
1409 {
1410 	int ret, i;
1411 	struct ubi_fastmap_layout *new_fm, *old_fm;
1412 	struct ubi_wl_entry *tmp_e;
1413 
1414 	mutex_lock(&ubi->fm_mutex);
1415 
1416 	ubi_refill_pools(ubi);
1417 
1418 	if (ubi->ro_mode || ubi->fm_disabled) {
1419 		mutex_unlock(&ubi->fm_mutex);
1420 		return 0;
1421 	}
1422 
1423 	ret = ubi_ensure_anchor_pebs(ubi);
1424 	if (ret) {
1425 		mutex_unlock(&ubi->fm_mutex);
1426 		return ret;
1427 	}
1428 
1429 	new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
1430 	if (!new_fm) {
1431 		mutex_unlock(&ubi->fm_mutex);
1432 		return -ENOMEM;
1433 	}
1434 
1435 	new_fm->used_blocks = ubi->fm_size / ubi->leb_size;
1436 
1437 	for (i = 0; i < new_fm->used_blocks; i++) {
1438 		new_fm->e[i] = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1439 		if (!new_fm->e[i]) {
1440 			while (i--)
1441 				kfree(new_fm->e[i]);
1442 
1443 			kfree(new_fm);
1444 			mutex_unlock(&ubi->fm_mutex);
1445 			return -ENOMEM;
1446 		}
1447 	}
1448 
1449 	old_fm = ubi->fm;
1450 	ubi->fm = NULL;
1451 
1452 	if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) {
1453 		ubi_err("fastmap too large");
1454 		ret = -ENOSPC;
1455 		goto err;
1456 	}
1457 
1458 	for (i = 1; i < new_fm->used_blocks; i++) {
1459 		spin_lock(&ubi->wl_lock);
1460 		tmp_e = ubi_wl_get_fm_peb(ubi, 0);
1461 		spin_unlock(&ubi->wl_lock);
1462 
1463 		if (!tmp_e && !old_fm) {
1464 			int j;
1465 			ubi_err("could not get any free erase block");
1466 
1467 			for (j = 1; j < i; j++)
1468 				ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0);
1469 
1470 			ret = -ENOSPC;
1471 			goto err;
1472 		} else if (!tmp_e && old_fm) {
1473 			ret = erase_block(ubi, old_fm->e[i]->pnum);
1474 			if (ret < 0) {
1475 				int j;
1476 
1477 				for (j = 1; j < i; j++)
1478 					ubi_wl_put_fm_peb(ubi, new_fm->e[j],
1479 							  j, 0);
1480 
1481 				ubi_err("could not erase old fastmap PEB");
1482 				goto err;
1483 			}
1484 
1485 			new_fm->e[i]->pnum = old_fm->e[i]->pnum;
1486 			new_fm->e[i]->ec = old_fm->e[i]->ec;
1487 		} else {
1488 			new_fm->e[i]->pnum = tmp_e->pnum;
1489 			new_fm->e[i]->ec = tmp_e->ec;
1490 
1491 			if (old_fm)
1492 				ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
1493 						  old_fm->to_be_tortured[i]);
1494 		}
1495 	}
1496 
1497 	spin_lock(&ubi->wl_lock);
1498 	tmp_e = ubi_wl_get_fm_peb(ubi, 1);
1499 	spin_unlock(&ubi->wl_lock);
1500 
1501 	if (old_fm) {
1502 		/* no fresh anchor PEB was found, reuse the old one */
1503 		if (!tmp_e) {
1504 			ret = erase_block(ubi, old_fm->e[0]->pnum);
1505 			if (ret < 0) {
1506 				int i;
1507 				ubi_err("could not erase old anchor PEB");
1508 
1509 				for (i = 1; i < new_fm->used_blocks; i++)
1510 					ubi_wl_put_fm_peb(ubi, new_fm->e[i],
1511 							  i, 0);
1512 				goto err;
1513 			}
1514 
1515 			new_fm->e[0]->pnum = old_fm->e[0]->pnum;
1516 			new_fm->e[0]->ec = ret;
1517 		} else {
1518 			/* we've got a new anchor PEB, return the old one */
1519 			ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0,
1520 					  old_fm->to_be_tortured[0]);
1521 
1522 			new_fm->e[0]->pnum = tmp_e->pnum;
1523 			new_fm->e[0]->ec = tmp_e->ec;
1524 		}
1525 	} else {
1526 		if (!tmp_e) {
1527 			int i;
1528 			ubi_err("could not find any anchor PEB");
1529 
1530 			for (i = 1; i < new_fm->used_blocks; i++)
1531 				ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0);
1532 
1533 			ret = -ENOSPC;
1534 			goto err;
1535 		}
1536 
1537 		new_fm->e[0]->pnum = tmp_e->pnum;
1538 		new_fm->e[0]->ec = tmp_e->ec;
1539 	}
1540 
1541 	down_write(&ubi->work_sem);
1542 	down_write(&ubi->fm_sem);
1543 	ret = ubi_write_fastmap(ubi, new_fm);
1544 	up_write(&ubi->fm_sem);
1545 	up_write(&ubi->work_sem);
1546 
1547 	if (ret)
1548 		goto err;
1549 
1550 out_unlock:
1551 	mutex_unlock(&ubi->fm_mutex);
1552 	kfree(old_fm);
1553 	return ret;
1554 
1555 err:
1556 	kfree(new_fm);
1557 
1558 	ubi_warn("Unable to write new fastmap, err=%i", ret);
1559 
1560 	ret = 0;
1561 	if (old_fm) {
1562 		ret = invalidate_fastmap(ubi, old_fm);
1563 		if (ret < 0)
1564 			ubi_err("Unable to invalidiate current fastmap!");
1565 		else if (ret)
1566 			ret = 0;
1567 	}
1568 	goto out_unlock;
1569 }
1570