xref: /openbmc/linux/fs/ubifs/tnc_commit.c (revision b60a5b8d)
1 /*
2  * This file is part of UBIFS.
3  *
4  * Copyright (C) 2006-2008 Nokia Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 as published by
8  * the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program; if not, write to the Free Software Foundation, Inc., 51
17  * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18  *
19  * Authors: Adrian Hunter
20  *          Artem Bityutskiy (Битюцкий Артём)
21  */
22 
23 /* This file implements TNC functions for committing */
24 
25 #include <linux/random.h>
26 #include "ubifs.h"
27 
28 /**
29  * make_idx_node - make an index node for fill-the-gaps method of TNC commit.
30  * @c: UBIFS file-system description object
31  * @idx: buffer in which to place new index node
32  * @znode: znode from which to make new index node
33  * @lnum: LEB number where new index node will be written
34  * @offs: offset where new index node will be written
35  * @len: length of new index node
36  */
37 static int make_idx_node(struct ubifs_info *c, struct ubifs_idx_node *idx,
38 			 struct ubifs_znode *znode, int lnum, int offs, int len)
39 {
40 	struct ubifs_znode *zp;
41 	u8 hash[UBIFS_HASH_ARR_SZ];
42 	int i, err;
43 
44 	/* Make index node */
45 	idx->ch.node_type = UBIFS_IDX_NODE;
46 	idx->child_cnt = cpu_to_le16(znode->child_cnt);
47 	idx->level = cpu_to_le16(znode->level);
48 	for (i = 0; i < znode->child_cnt; i++) {
49 		struct ubifs_branch *br = ubifs_idx_branch(c, idx, i);
50 		struct ubifs_zbranch *zbr = &znode->zbranch[i];
51 
52 		key_write_idx(c, &zbr->key, &br->key);
53 		br->lnum = cpu_to_le32(zbr->lnum);
54 		br->offs = cpu_to_le32(zbr->offs);
55 		br->len = cpu_to_le32(zbr->len);
56 		ubifs_copy_hash(c, zbr->hash, ubifs_branch_hash(c, br));
57 		if (!zbr->lnum || !zbr->len) {
58 			ubifs_err(c, "bad ref in znode");
59 			ubifs_dump_znode(c, znode);
60 			if (zbr->znode)
61 				ubifs_dump_znode(c, zbr->znode);
62 
63 			return -EINVAL;
64 		}
65 	}
66 	ubifs_prepare_node(c, idx, len, 0);
67 	ubifs_node_calc_hash(c, idx, hash);
68 
69 	znode->lnum = lnum;
70 	znode->offs = offs;
71 	znode->len = len;
72 
73 	err = insert_old_idx_znode(c, znode);
74 
75 	/* Update the parent */
76 	zp = znode->parent;
77 	if (zp) {
78 		struct ubifs_zbranch *zbr;
79 
80 		zbr = &zp->zbranch[znode->iip];
81 		zbr->lnum = lnum;
82 		zbr->offs = offs;
83 		zbr->len = len;
84 		ubifs_copy_hash(c, hash, zbr->hash);
85 	} else {
86 		c->zroot.lnum = lnum;
87 		c->zroot.offs = offs;
88 		c->zroot.len = len;
89 		ubifs_copy_hash(c, hash, c->zroot.hash);
90 	}
91 	c->calc_idx_sz += ALIGN(len, 8);
92 
93 	atomic_long_dec(&c->dirty_zn_cnt);
94 
95 	ubifs_assert(c, ubifs_zn_dirty(znode));
96 	ubifs_assert(c, ubifs_zn_cow(znode));
97 
98 	/*
99 	 * Note, unlike 'write_index()' we do not add memory barriers here
100 	 * because this function is called with @c->tnc_mutex locked.
101 	 */
102 	__clear_bit(DIRTY_ZNODE, &znode->flags);
103 	__clear_bit(COW_ZNODE, &znode->flags);
104 
105 	return err;
106 }
107 
108 /**
109  * fill_gap - make index nodes in gaps in dirty index LEBs.
110  * @c: UBIFS file-system description object
111  * @lnum: LEB number that gap appears in
112  * @gap_start: offset of start of gap
113  * @gap_end: offset of end of gap
114  * @dirt: adds dirty space to this
115  *
116  * This function returns the number of index nodes written into the gap.
117  */
118 static int fill_gap(struct ubifs_info *c, int lnum, int gap_start, int gap_end,
119 		    int *dirt)
120 {
121 	int len, gap_remains, gap_pos, written, pad_len;
122 
123 	ubifs_assert(c, (gap_start & 7) == 0);
124 	ubifs_assert(c, (gap_end & 7) == 0);
125 	ubifs_assert(c, gap_end >= gap_start);
126 
127 	gap_remains = gap_end - gap_start;
128 	if (!gap_remains)
129 		return 0;
130 	gap_pos = gap_start;
131 	written = 0;
132 	while (c->enext) {
133 		len = ubifs_idx_node_sz(c, c->enext->child_cnt);
134 		if (len < gap_remains) {
135 			struct ubifs_znode *znode = c->enext;
136 			const int alen = ALIGN(len, 8);
137 			int err;
138 
139 			ubifs_assert(c, alen <= gap_remains);
140 			err = make_idx_node(c, c->ileb_buf + gap_pos, znode,
141 					    lnum, gap_pos, len);
142 			if (err)
143 				return err;
144 			gap_remains -= alen;
145 			gap_pos += alen;
146 			c->enext = znode->cnext;
147 			if (c->enext == c->cnext)
148 				c->enext = NULL;
149 			written += 1;
150 		} else
151 			break;
152 	}
153 	if (gap_end == c->leb_size) {
154 		c->ileb_len = ALIGN(gap_pos, c->min_io_size);
155 		/* Pad to end of min_io_size */
156 		pad_len = c->ileb_len - gap_pos;
157 	} else
158 		/* Pad to end of gap */
159 		pad_len = gap_remains;
160 	dbg_gc("LEB %d:%d to %d len %d nodes written %d wasted bytes %d",
161 	       lnum, gap_start, gap_end, gap_end - gap_start, written, pad_len);
162 	ubifs_pad(c, c->ileb_buf + gap_pos, pad_len);
163 	*dirt += pad_len;
164 	return written;
165 }
166 
167 /**
168  * find_old_idx - find an index node obsoleted since the last commit start.
169  * @c: UBIFS file-system description object
170  * @lnum: LEB number of obsoleted index node
171  * @offs: offset of obsoleted index node
172  *
173  * Returns %1 if found and %0 otherwise.
174  */
175 static int find_old_idx(struct ubifs_info *c, int lnum, int offs)
176 {
177 	struct ubifs_old_idx *o;
178 	struct rb_node *p;
179 
180 	p = c->old_idx.rb_node;
181 	while (p) {
182 		o = rb_entry(p, struct ubifs_old_idx, rb);
183 		if (lnum < o->lnum)
184 			p = p->rb_left;
185 		else if (lnum > o->lnum)
186 			p = p->rb_right;
187 		else if (offs < o->offs)
188 			p = p->rb_left;
189 		else if (offs > o->offs)
190 			p = p->rb_right;
191 		else
192 			return 1;
193 	}
194 	return 0;
195 }
196 
197 /**
198  * is_idx_node_in_use - determine if an index node can be overwritten.
199  * @c: UBIFS file-system description object
200  * @key: key of index node
201  * @level: index node level
202  * @lnum: LEB number of index node
203  * @offs: offset of index node
204  *
205  * If @key / @lnum / @offs identify an index node that was not part of the old
206  * index, then this function returns %0 (obsolete).  Else if the index node was
207  * part of the old index but is now dirty %1 is returned, else if it is clean %2
208  * is returned. A negative error code is returned on failure.
209  */
210 static int is_idx_node_in_use(struct ubifs_info *c, union ubifs_key *key,
211 			      int level, int lnum, int offs)
212 {
213 	int ret;
214 
215 	ret = is_idx_node_in_tnc(c, key, level, lnum, offs);
216 	if (ret < 0)
217 		return ret; /* Error code */
218 	if (ret == 0)
219 		if (find_old_idx(c, lnum, offs))
220 			return 1;
221 	return ret;
222 }
223 
224 /**
225  * layout_leb_in_gaps - layout index nodes using in-the-gaps method.
226  * @c: UBIFS file-system description object
227  * @p: return LEB number here
228  *
229  * This function lays out new index nodes for dirty znodes using in-the-gaps
230  * method of TNC commit.
231  * This function merely puts the next znode into the next gap, making no attempt
232  * to try to maximise the number of znodes that fit.
233  * This function returns the number of index nodes written into the gaps, or a
234  * negative error code on failure.
235  */
236 static int layout_leb_in_gaps(struct ubifs_info *c, int *p)
237 {
238 	struct ubifs_scan_leb *sleb;
239 	struct ubifs_scan_node *snod;
240 	int lnum, dirt = 0, gap_start, gap_end, err, written, tot_written;
241 
242 	tot_written = 0;
243 	/* Get an index LEB with lots of obsolete index nodes */
244 	lnum = ubifs_find_dirty_idx_leb(c);
245 	if (lnum < 0)
246 		/*
247 		 * There also may be dirt in the index head that could be
248 		 * filled, however we do not check there at present.
249 		 */
250 		return lnum; /* Error code */
251 	*p = lnum;
252 	dbg_gc("LEB %d", lnum);
253 	/*
254 	 * Scan the index LEB.  We use the generic scan for this even though
255 	 * it is more comprehensive and less efficient than is needed for this
256 	 * purpose.
257 	 */
258 	sleb = ubifs_scan(c, lnum, 0, c->ileb_buf, 0);
259 	c->ileb_len = 0;
260 	if (IS_ERR(sleb))
261 		return PTR_ERR(sleb);
262 	gap_start = 0;
263 	list_for_each_entry(snod, &sleb->nodes, list) {
264 		struct ubifs_idx_node *idx;
265 		int in_use, level;
266 
267 		ubifs_assert(c, snod->type == UBIFS_IDX_NODE);
268 		idx = snod->node;
269 		key_read(c, ubifs_idx_key(c, idx), &snod->key);
270 		level = le16_to_cpu(idx->level);
271 		/* Determine if the index node is in use (not obsolete) */
272 		in_use = is_idx_node_in_use(c, &snod->key, level, lnum,
273 					    snod->offs);
274 		if (in_use < 0) {
275 			ubifs_scan_destroy(sleb);
276 			return in_use; /* Error code */
277 		}
278 		if (in_use) {
279 			if (in_use == 1)
280 				dirt += ALIGN(snod->len, 8);
281 			/*
282 			 * The obsolete index nodes form gaps that can be
283 			 * overwritten.  This gap has ended because we have
284 			 * found an index node that is still in use
285 			 * i.e. not obsolete
286 			 */
287 			gap_end = snod->offs;
288 			/* Try to fill gap */
289 			written = fill_gap(c, lnum, gap_start, gap_end, &dirt);
290 			if (written < 0) {
291 				ubifs_scan_destroy(sleb);
292 				return written; /* Error code */
293 			}
294 			tot_written += written;
295 			gap_start = ALIGN(snod->offs + snod->len, 8);
296 		}
297 	}
298 	ubifs_scan_destroy(sleb);
299 	c->ileb_len = c->leb_size;
300 	gap_end = c->leb_size;
301 	/* Try to fill gap */
302 	written = fill_gap(c, lnum, gap_start, gap_end, &dirt);
303 	if (written < 0)
304 		return written; /* Error code */
305 	tot_written += written;
306 	if (tot_written == 0) {
307 		struct ubifs_lprops lp;
308 
309 		dbg_gc("LEB %d wrote %d index nodes", lnum, tot_written);
310 		err = ubifs_read_one_lp(c, lnum, &lp);
311 		if (err)
312 			return err;
313 		if (lp.free == c->leb_size) {
314 			/*
315 			 * We must have snatched this LEB from the idx_gc list
316 			 * so we need to correct the free and dirty space.
317 			 */
318 			err = ubifs_change_one_lp(c, lnum,
319 						  c->leb_size - c->ileb_len,
320 						  dirt, 0, 0, 0);
321 			if (err)
322 				return err;
323 		}
324 		return 0;
325 	}
326 	err = ubifs_change_one_lp(c, lnum, c->leb_size - c->ileb_len, dirt,
327 				  0, 0, 0);
328 	if (err)
329 		return err;
330 	err = ubifs_leb_change(c, lnum, c->ileb_buf, c->ileb_len);
331 	if (err)
332 		return err;
333 	dbg_gc("LEB %d wrote %d index nodes", lnum, tot_written);
334 	return tot_written;
335 }
336 
337 /**
338  * get_leb_cnt - calculate the number of empty LEBs needed to commit.
339  * @c: UBIFS file-system description object
340  * @cnt: number of znodes to commit
341  *
342  * This function returns the number of empty LEBs needed to commit @cnt znodes
343  * to the current index head.  The number is not exact and may be more than
344  * needed.
345  */
346 static int get_leb_cnt(struct ubifs_info *c, int cnt)
347 {
348 	int d;
349 
350 	/* Assume maximum index node size (i.e. overestimate space needed) */
351 	cnt -= (c->leb_size - c->ihead_offs) / c->max_idx_node_sz;
352 	if (cnt < 0)
353 		cnt = 0;
354 	d = c->leb_size / c->max_idx_node_sz;
355 	return DIV_ROUND_UP(cnt, d);
356 }
357 
358 /**
359  * layout_in_gaps - in-the-gaps method of committing TNC.
360  * @c: UBIFS file-system description object
361  * @cnt: number of dirty znodes to commit.
362  *
363  * This function lays out new index nodes for dirty znodes using in-the-gaps
364  * method of TNC commit.
365  *
366  * This function returns %0 on success and a negative error code on failure.
367  */
368 static int layout_in_gaps(struct ubifs_info *c, int cnt)
369 {
370 	int err, leb_needed_cnt, written, *p;
371 
372 	dbg_gc("%d znodes to write", cnt);
373 
374 	c->gap_lebs = kmalloc_array(c->lst.idx_lebs + 1, sizeof(int),
375 				    GFP_NOFS);
376 	if (!c->gap_lebs)
377 		return -ENOMEM;
378 
379 	p = c->gap_lebs;
380 	do {
381 		ubifs_assert(c, p < c->gap_lebs + c->lst.idx_lebs);
382 		written = layout_leb_in_gaps(c, p);
383 		if (written < 0) {
384 			err = written;
385 			if (err != -ENOSPC) {
386 				kfree(c->gap_lebs);
387 				c->gap_lebs = NULL;
388 				return err;
389 			}
390 			if (!dbg_is_chk_index(c)) {
391 				/*
392 				 * Do not print scary warnings if the debugging
393 				 * option which forces in-the-gaps is enabled.
394 				 */
395 				ubifs_warn(c, "out of space");
396 				ubifs_dump_budg(c, &c->bi);
397 				ubifs_dump_lprops(c);
398 			}
399 			/* Try to commit anyway */
400 			break;
401 		}
402 		p++;
403 		cnt -= written;
404 		leb_needed_cnt = get_leb_cnt(c, cnt);
405 		dbg_gc("%d znodes remaining, need %d LEBs, have %d", cnt,
406 		       leb_needed_cnt, c->ileb_cnt);
407 	} while (leb_needed_cnt > c->ileb_cnt);
408 
409 	*p = -1;
410 	return 0;
411 }
412 
413 /**
414  * layout_in_empty_space - layout index nodes in empty space.
415  * @c: UBIFS file-system description object
416  *
417  * This function lays out new index nodes for dirty znodes using empty LEBs.
418  *
419  * This function returns %0 on success and a negative error code on failure.
420  */
421 static int layout_in_empty_space(struct ubifs_info *c)
422 {
423 	struct ubifs_znode *znode, *cnext, *zp;
424 	int lnum, offs, len, next_len, buf_len, buf_offs, used, avail;
425 	int wlen, blen, err;
426 
427 	cnext = c->enext;
428 	if (!cnext)
429 		return 0;
430 
431 	lnum = c->ihead_lnum;
432 	buf_offs = c->ihead_offs;
433 
434 	buf_len = ubifs_idx_node_sz(c, c->fanout);
435 	buf_len = ALIGN(buf_len, c->min_io_size);
436 	used = 0;
437 	avail = buf_len;
438 
439 	/* Ensure there is enough room for first write */
440 	next_len = ubifs_idx_node_sz(c, cnext->child_cnt);
441 	if (buf_offs + next_len > c->leb_size)
442 		lnum = -1;
443 
444 	while (1) {
445 		znode = cnext;
446 
447 		len = ubifs_idx_node_sz(c, znode->child_cnt);
448 
449 		/* Determine the index node position */
450 		if (lnum == -1) {
451 			if (c->ileb_nxt >= c->ileb_cnt) {
452 				ubifs_err(c, "out of space");
453 				return -ENOSPC;
454 			}
455 			lnum = c->ilebs[c->ileb_nxt++];
456 			buf_offs = 0;
457 			used = 0;
458 			avail = buf_len;
459 		}
460 
461 		offs = buf_offs + used;
462 
463 		znode->lnum = lnum;
464 		znode->offs = offs;
465 		znode->len = len;
466 
467 		/* Update the parent */
468 		zp = znode->parent;
469 		if (zp) {
470 			struct ubifs_zbranch *zbr;
471 			int i;
472 
473 			i = znode->iip;
474 			zbr = &zp->zbranch[i];
475 			zbr->lnum = lnum;
476 			zbr->offs = offs;
477 			zbr->len = len;
478 		} else {
479 			c->zroot.lnum = lnum;
480 			c->zroot.offs = offs;
481 			c->zroot.len = len;
482 		}
483 		c->calc_idx_sz += ALIGN(len, 8);
484 
485 		/*
486 		 * Once lprops is updated, we can decrease the dirty znode count
487 		 * but it is easier to just do it here.
488 		 */
489 		atomic_long_dec(&c->dirty_zn_cnt);
490 
491 		/*
492 		 * Calculate the next index node length to see if there is
493 		 * enough room for it
494 		 */
495 		cnext = znode->cnext;
496 		if (cnext == c->cnext)
497 			next_len = 0;
498 		else
499 			next_len = ubifs_idx_node_sz(c, cnext->child_cnt);
500 
501 		/* Update buffer positions */
502 		wlen = used + len;
503 		used += ALIGN(len, 8);
504 		avail -= ALIGN(len, 8);
505 
506 		if (next_len != 0 &&
507 		    buf_offs + used + next_len <= c->leb_size &&
508 		    avail > 0)
509 			continue;
510 
511 		if (avail <= 0 && next_len &&
512 		    buf_offs + used + next_len <= c->leb_size)
513 			blen = buf_len;
514 		else
515 			blen = ALIGN(wlen, c->min_io_size);
516 
517 		/* The buffer is full or there are no more znodes to do */
518 		buf_offs += blen;
519 		if (next_len) {
520 			if (buf_offs + next_len > c->leb_size) {
521 				err = ubifs_update_one_lp(c, lnum,
522 					c->leb_size - buf_offs, blen - used,
523 					0, 0);
524 				if (err)
525 					return err;
526 				lnum = -1;
527 			}
528 			used -= blen;
529 			if (used < 0)
530 				used = 0;
531 			avail = buf_len - used;
532 			continue;
533 		}
534 		err = ubifs_update_one_lp(c, lnum, c->leb_size - buf_offs,
535 					  blen - used, 0, 0);
536 		if (err)
537 			return err;
538 		break;
539 	}
540 
541 	c->dbg->new_ihead_lnum = lnum;
542 	c->dbg->new_ihead_offs = buf_offs;
543 
544 	return 0;
545 }
546 
547 /**
548  * layout_commit - determine positions of index nodes to commit.
549  * @c: UBIFS file-system description object
550  * @no_space: indicates that insufficient empty LEBs were allocated
551  * @cnt: number of znodes to commit
552  *
553  * Calculate and update the positions of index nodes to commit.  If there were
554  * an insufficient number of empty LEBs allocated, then index nodes are placed
555  * into the gaps created by obsolete index nodes in non-empty index LEBs.  For
556  * this purpose, an obsolete index node is one that was not in the index as at
557  * the end of the last commit.  To write "in-the-gaps" requires that those index
558  * LEBs are updated atomically in-place.
559  */
560 static int layout_commit(struct ubifs_info *c, int no_space, int cnt)
561 {
562 	int err;
563 
564 	if (no_space) {
565 		err = layout_in_gaps(c, cnt);
566 		if (err)
567 			return err;
568 	}
569 	err = layout_in_empty_space(c);
570 	return err;
571 }
572 
573 /**
574  * find_first_dirty - find first dirty znode.
575  * @znode: znode to begin searching from
576  */
577 static struct ubifs_znode *find_first_dirty(struct ubifs_znode *znode)
578 {
579 	int i, cont;
580 
581 	if (!znode)
582 		return NULL;
583 
584 	while (1) {
585 		if (znode->level == 0) {
586 			if (ubifs_zn_dirty(znode))
587 				return znode;
588 			return NULL;
589 		}
590 		cont = 0;
591 		for (i = 0; i < znode->child_cnt; i++) {
592 			struct ubifs_zbranch *zbr = &znode->zbranch[i];
593 
594 			if (zbr->znode && ubifs_zn_dirty(zbr->znode)) {
595 				znode = zbr->znode;
596 				cont = 1;
597 				break;
598 			}
599 		}
600 		if (!cont) {
601 			if (ubifs_zn_dirty(znode))
602 				return znode;
603 			return NULL;
604 		}
605 	}
606 }
607 
608 /**
609  * find_next_dirty - find next dirty znode.
610  * @znode: znode to begin searching from
611  */
612 static struct ubifs_znode *find_next_dirty(struct ubifs_znode *znode)
613 {
614 	int n = znode->iip + 1;
615 
616 	znode = znode->parent;
617 	if (!znode)
618 		return NULL;
619 	for (; n < znode->child_cnt; n++) {
620 		struct ubifs_zbranch *zbr = &znode->zbranch[n];
621 
622 		if (zbr->znode && ubifs_zn_dirty(zbr->znode))
623 			return find_first_dirty(zbr->znode);
624 	}
625 	return znode;
626 }
627 
628 /**
629  * get_znodes_to_commit - create list of dirty znodes to commit.
630  * @c: UBIFS file-system description object
631  *
632  * This function returns the number of znodes to commit.
633  */
634 static int get_znodes_to_commit(struct ubifs_info *c)
635 {
636 	struct ubifs_znode *znode, *cnext;
637 	int cnt = 0;
638 
639 	c->cnext = find_first_dirty(c->zroot.znode);
640 	znode = c->enext = c->cnext;
641 	if (!znode) {
642 		dbg_cmt("no znodes to commit");
643 		return 0;
644 	}
645 	cnt += 1;
646 	while (1) {
647 		ubifs_assert(c, !ubifs_zn_cow(znode));
648 		__set_bit(COW_ZNODE, &znode->flags);
649 		znode->alt = 0;
650 		cnext = find_next_dirty(znode);
651 		if (!cnext) {
652 			znode->cnext = c->cnext;
653 			break;
654 		}
655 		znode->cparent = znode->parent;
656 		znode->ciip = znode->iip;
657 		znode->cnext = cnext;
658 		znode = cnext;
659 		cnt += 1;
660 	}
661 	dbg_cmt("committing %d znodes", cnt);
662 	ubifs_assert(c, cnt == atomic_long_read(&c->dirty_zn_cnt));
663 	return cnt;
664 }
665 
666 /**
667  * alloc_idx_lebs - allocate empty LEBs to be used to commit.
668  * @c: UBIFS file-system description object
669  * @cnt: number of znodes to commit
670  *
671  * This function returns %-ENOSPC if it cannot allocate a sufficient number of
672  * empty LEBs.  %0 is returned on success, otherwise a negative error code
673  * is returned.
674  */
675 static int alloc_idx_lebs(struct ubifs_info *c, int cnt)
676 {
677 	int i, leb_cnt, lnum;
678 
679 	c->ileb_cnt = 0;
680 	c->ileb_nxt = 0;
681 	leb_cnt = get_leb_cnt(c, cnt);
682 	dbg_cmt("need about %d empty LEBS for TNC commit", leb_cnt);
683 	if (!leb_cnt)
684 		return 0;
685 	c->ilebs = kmalloc_array(leb_cnt, sizeof(int), GFP_NOFS);
686 	if (!c->ilebs)
687 		return -ENOMEM;
688 	for (i = 0; i < leb_cnt; i++) {
689 		lnum = ubifs_find_free_leb_for_idx(c);
690 		if (lnum < 0)
691 			return lnum;
692 		c->ilebs[c->ileb_cnt++] = lnum;
693 		dbg_cmt("LEB %d", lnum);
694 	}
695 	if (dbg_is_chk_index(c) && !(prandom_u32() & 7))
696 		return -ENOSPC;
697 	return 0;
698 }
699 
700 /**
701  * free_unused_idx_lebs - free unused LEBs that were allocated for the commit.
702  * @c: UBIFS file-system description object
703  *
704  * It is possible that we allocate more empty LEBs for the commit than we need.
705  * This functions frees the surplus.
706  *
707  * This function returns %0 on success and a negative error code on failure.
708  */
709 static int free_unused_idx_lebs(struct ubifs_info *c)
710 {
711 	int i, err = 0, lnum, er;
712 
713 	for (i = c->ileb_nxt; i < c->ileb_cnt; i++) {
714 		lnum = c->ilebs[i];
715 		dbg_cmt("LEB %d", lnum);
716 		er = ubifs_change_one_lp(c, lnum, LPROPS_NC, LPROPS_NC, 0,
717 					 LPROPS_INDEX | LPROPS_TAKEN, 0);
718 		if (!err)
719 			err = er;
720 	}
721 	return err;
722 }
723 
724 /**
725  * free_idx_lebs - free unused LEBs after commit end.
726  * @c: UBIFS file-system description object
727  *
728  * This function returns %0 on success and a negative error code on failure.
729  */
730 static int free_idx_lebs(struct ubifs_info *c)
731 {
732 	int err;
733 
734 	err = free_unused_idx_lebs(c);
735 	kfree(c->ilebs);
736 	c->ilebs = NULL;
737 	return err;
738 }
739 
740 /**
741  * ubifs_tnc_start_commit - start TNC commit.
742  * @c: UBIFS file-system description object
743  * @zroot: new index root position is returned here
744  *
745  * This function prepares the list of indexing nodes to commit and lays out
746  * their positions on flash. If there is not enough free space it uses the
747  * in-gap commit method. Returns zero in case of success and a negative error
748  * code in case of failure.
749  */
750 int ubifs_tnc_start_commit(struct ubifs_info *c, struct ubifs_zbranch *zroot)
751 {
752 	int err = 0, cnt;
753 
754 	mutex_lock(&c->tnc_mutex);
755 	err = dbg_check_tnc(c, 1);
756 	if (err)
757 		goto out;
758 	cnt = get_znodes_to_commit(c);
759 	if (cnt != 0) {
760 		int no_space = 0;
761 
762 		err = alloc_idx_lebs(c, cnt);
763 		if (err == -ENOSPC)
764 			no_space = 1;
765 		else if (err)
766 			goto out_free;
767 		err = layout_commit(c, no_space, cnt);
768 		if (err)
769 			goto out_free;
770 		ubifs_assert(c, atomic_long_read(&c->dirty_zn_cnt) == 0);
771 		err = free_unused_idx_lebs(c);
772 		if (err)
773 			goto out;
774 	}
775 	destroy_old_idx(c);
776 	memcpy(zroot, &c->zroot, sizeof(struct ubifs_zbranch));
777 
778 	err = ubifs_save_dirty_idx_lnums(c);
779 	if (err)
780 		goto out;
781 
782 	spin_lock(&c->space_lock);
783 	/*
784 	 * Although we have not finished committing yet, update size of the
785 	 * committed index ('c->bi.old_idx_sz') and zero out the index growth
786 	 * budget. It is OK to do this now, because we've reserved all the
787 	 * space which is needed to commit the index, and it is save for the
788 	 * budgeting subsystem to assume the index is already committed,
789 	 * even though it is not.
790 	 */
791 	ubifs_assert(c, c->bi.min_idx_lebs == ubifs_calc_min_idx_lebs(c));
792 	c->bi.old_idx_sz = c->calc_idx_sz;
793 	c->bi.uncommitted_idx = 0;
794 	c->bi.min_idx_lebs = ubifs_calc_min_idx_lebs(c);
795 	spin_unlock(&c->space_lock);
796 	mutex_unlock(&c->tnc_mutex);
797 
798 	dbg_cmt("number of index LEBs %d", c->lst.idx_lebs);
799 	dbg_cmt("size of index %llu", c->calc_idx_sz);
800 	return err;
801 
802 out_free:
803 	free_idx_lebs(c);
804 out:
805 	mutex_unlock(&c->tnc_mutex);
806 	return err;
807 }
808 
809 /**
810  * write_index - write index nodes.
811  * @c: UBIFS file-system description object
812  *
813  * This function writes the index nodes whose positions were laid out in the
814  * layout_in_empty_space function.
815  */
816 static int write_index(struct ubifs_info *c)
817 {
818 	struct ubifs_idx_node *idx;
819 	struct ubifs_znode *znode, *cnext;
820 	int i, lnum, offs, len, next_len, buf_len, buf_offs, used;
821 	int avail, wlen, err, lnum_pos = 0, blen, nxt_offs;
822 
823 	cnext = c->enext;
824 	if (!cnext)
825 		return 0;
826 
827 	/*
828 	 * Always write index nodes to the index head so that index nodes and
829 	 * other types of nodes are never mixed in the same erase block.
830 	 */
831 	lnum = c->ihead_lnum;
832 	buf_offs = c->ihead_offs;
833 
834 	/* Allocate commit buffer */
835 	buf_len = ALIGN(c->max_idx_node_sz, c->min_io_size);
836 	used = 0;
837 	avail = buf_len;
838 
839 	/* Ensure there is enough room for first write */
840 	next_len = ubifs_idx_node_sz(c, cnext->child_cnt);
841 	if (buf_offs + next_len > c->leb_size) {
842 		err = ubifs_update_one_lp(c, lnum, LPROPS_NC, 0, 0,
843 					  LPROPS_TAKEN);
844 		if (err)
845 			return err;
846 		lnum = -1;
847 	}
848 
849 	while (1) {
850 		u8 hash[UBIFS_HASH_ARR_SZ];
851 
852 		cond_resched();
853 
854 		znode = cnext;
855 		idx = c->cbuf + used;
856 
857 		/* Make index node */
858 		idx->ch.node_type = UBIFS_IDX_NODE;
859 		idx->child_cnt = cpu_to_le16(znode->child_cnt);
860 		idx->level = cpu_to_le16(znode->level);
861 		for (i = 0; i < znode->child_cnt; i++) {
862 			struct ubifs_branch *br = ubifs_idx_branch(c, idx, i);
863 			struct ubifs_zbranch *zbr = &znode->zbranch[i];
864 
865 			key_write_idx(c, &zbr->key, &br->key);
866 			br->lnum = cpu_to_le32(zbr->lnum);
867 			br->offs = cpu_to_le32(zbr->offs);
868 			br->len = cpu_to_le32(zbr->len);
869 			ubifs_copy_hash(c, zbr->hash, ubifs_branch_hash(c, br));
870 			if (!zbr->lnum || !zbr->len) {
871 				ubifs_err(c, "bad ref in znode");
872 				ubifs_dump_znode(c, znode);
873 				if (zbr->znode)
874 					ubifs_dump_znode(c, zbr->znode);
875 
876 				return -EINVAL;
877 			}
878 		}
879 		len = ubifs_idx_node_sz(c, znode->child_cnt);
880 		ubifs_prepare_node(c, idx, len, 0);
881 		ubifs_node_calc_hash(c, idx, hash);
882 
883 		mutex_lock(&c->tnc_mutex);
884 
885 		if (znode->cparent)
886 			ubifs_copy_hash(c, hash,
887 					znode->cparent->zbranch[znode->ciip].hash);
888 
889 		if (znode->parent) {
890 			if (!ubifs_zn_obsolete(znode))
891 				ubifs_copy_hash(c, hash,
892 					znode->parent->zbranch[znode->iip].hash);
893 		} else {
894 			ubifs_copy_hash(c, hash, c->zroot.hash);
895 		}
896 
897 		mutex_unlock(&c->tnc_mutex);
898 
899 		/* Determine the index node position */
900 		if (lnum == -1) {
901 			lnum = c->ilebs[lnum_pos++];
902 			buf_offs = 0;
903 			used = 0;
904 			avail = buf_len;
905 		}
906 		offs = buf_offs + used;
907 
908 		if (lnum != znode->lnum || offs != znode->offs ||
909 		    len != znode->len) {
910 			ubifs_err(c, "inconsistent znode posn");
911 			return -EINVAL;
912 		}
913 
914 		/* Grab some stuff from znode while we still can */
915 		cnext = znode->cnext;
916 
917 		ubifs_assert(c, ubifs_zn_dirty(znode));
918 		ubifs_assert(c, ubifs_zn_cow(znode));
919 
920 		/*
921 		 * It is important that other threads should see %DIRTY_ZNODE
922 		 * flag cleared before %COW_ZNODE. Specifically, it matters in
923 		 * the 'dirty_cow_znode()' function. This is the reason for the
924 		 * first barrier. Also, we want the bit changes to be seen to
925 		 * other threads ASAP, to avoid unnecesarry copying, which is
926 		 * the reason for the second barrier.
927 		 */
928 		clear_bit(DIRTY_ZNODE, &znode->flags);
929 		smp_mb__before_atomic();
930 		clear_bit(COW_ZNODE, &znode->flags);
931 		smp_mb__after_atomic();
932 
933 		/*
934 		 * We have marked the znode as clean but have not updated the
935 		 * @c->clean_zn_cnt counter. If this znode becomes dirty again
936 		 * before 'free_obsolete_znodes()' is called, then
937 		 * @c->clean_zn_cnt will be decremented before it gets
938 		 * incremented (resulting in 2 decrements for the same znode).
939 		 * This means that @c->clean_zn_cnt may become negative for a
940 		 * while.
941 		 *
942 		 * Q: why we cannot increment @c->clean_zn_cnt?
943 		 * A: because we do not have the @c->tnc_mutex locked, and the
944 		 *    following code would be racy and buggy:
945 		 *
946 		 *    if (!ubifs_zn_obsolete(znode)) {
947 		 *            atomic_long_inc(&c->clean_zn_cnt);
948 		 *            atomic_long_inc(&ubifs_clean_zn_cnt);
949 		 *    }
950 		 *
951 		 *    Thus, we just delay the @c->clean_zn_cnt update until we
952 		 *    have the mutex locked.
953 		 */
954 
955 		/* Do not access znode from this point on */
956 
957 		/* Update buffer positions */
958 		wlen = used + len;
959 		used += ALIGN(len, 8);
960 		avail -= ALIGN(len, 8);
961 
962 		/*
963 		 * Calculate the next index node length to see if there is
964 		 * enough room for it
965 		 */
966 		if (cnext == c->cnext)
967 			next_len = 0;
968 		else
969 			next_len = ubifs_idx_node_sz(c, cnext->child_cnt);
970 
971 		nxt_offs = buf_offs + used + next_len;
972 		if (next_len && nxt_offs <= c->leb_size) {
973 			if (avail > 0)
974 				continue;
975 			else
976 				blen = buf_len;
977 		} else {
978 			wlen = ALIGN(wlen, 8);
979 			blen = ALIGN(wlen, c->min_io_size);
980 			ubifs_pad(c, c->cbuf + wlen, blen - wlen);
981 		}
982 
983 		/* The buffer is full or there are no more znodes to do */
984 		err = ubifs_leb_write(c, lnum, c->cbuf, buf_offs, blen);
985 		if (err)
986 			return err;
987 		buf_offs += blen;
988 		if (next_len) {
989 			if (nxt_offs > c->leb_size) {
990 				err = ubifs_update_one_lp(c, lnum, LPROPS_NC, 0,
991 							  0, LPROPS_TAKEN);
992 				if (err)
993 					return err;
994 				lnum = -1;
995 			}
996 			used -= blen;
997 			if (used < 0)
998 				used = 0;
999 			avail = buf_len - used;
1000 			memmove(c->cbuf, c->cbuf + blen, used);
1001 			continue;
1002 		}
1003 		break;
1004 	}
1005 
1006 	if (lnum != c->dbg->new_ihead_lnum ||
1007 	    buf_offs != c->dbg->new_ihead_offs) {
1008 		ubifs_err(c, "inconsistent ihead");
1009 		return -EINVAL;
1010 	}
1011 
1012 	c->ihead_lnum = lnum;
1013 	c->ihead_offs = buf_offs;
1014 
1015 	return 0;
1016 }
1017 
1018 /**
1019  * free_obsolete_znodes - free obsolete znodes.
1020  * @c: UBIFS file-system description object
1021  *
1022  * At the end of commit end, obsolete znodes are freed.
1023  */
1024 static void free_obsolete_znodes(struct ubifs_info *c)
1025 {
1026 	struct ubifs_znode *znode, *cnext;
1027 
1028 	cnext = c->cnext;
1029 	do {
1030 		znode = cnext;
1031 		cnext = znode->cnext;
1032 		if (ubifs_zn_obsolete(znode))
1033 			kfree(znode);
1034 		else {
1035 			znode->cnext = NULL;
1036 			atomic_long_inc(&c->clean_zn_cnt);
1037 			atomic_long_inc(&ubifs_clean_zn_cnt);
1038 		}
1039 	} while (cnext != c->cnext);
1040 }
1041 
1042 /**
1043  * return_gap_lebs - return LEBs used by the in-gap commit method.
1044  * @c: UBIFS file-system description object
1045  *
1046  * This function clears the "taken" flag for the LEBs which were used by the
1047  * "commit in-the-gaps" method.
1048  */
1049 static int return_gap_lebs(struct ubifs_info *c)
1050 {
1051 	int *p, err;
1052 
1053 	if (!c->gap_lebs)
1054 		return 0;
1055 
1056 	dbg_cmt("");
1057 	for (p = c->gap_lebs; *p != -1; p++) {
1058 		err = ubifs_change_one_lp(c, *p, LPROPS_NC, LPROPS_NC, 0,
1059 					  LPROPS_TAKEN, 0);
1060 		if (err)
1061 			return err;
1062 	}
1063 
1064 	kfree(c->gap_lebs);
1065 	c->gap_lebs = NULL;
1066 	return 0;
1067 }
1068 
1069 /**
1070  * ubifs_tnc_end_commit - update the TNC for commit end.
1071  * @c: UBIFS file-system description object
1072  *
1073  * Write the dirty znodes.
1074  */
1075 int ubifs_tnc_end_commit(struct ubifs_info *c)
1076 {
1077 	int err;
1078 
1079 	if (!c->cnext)
1080 		return 0;
1081 
1082 	err = return_gap_lebs(c);
1083 	if (err)
1084 		return err;
1085 
1086 	err = write_index(c);
1087 	if (err)
1088 		return err;
1089 
1090 	mutex_lock(&c->tnc_mutex);
1091 
1092 	dbg_cmt("TNC height is %d", c->zroot.znode->level + 1);
1093 
1094 	free_obsolete_znodes(c);
1095 
1096 	c->cnext = NULL;
1097 	kfree(c->ilebs);
1098 	c->ilebs = NULL;
1099 
1100 	mutex_unlock(&c->tnc_mutex);
1101 
1102 	return 0;
1103 }
1104