1 /* 2 * This file is part of UBIFS. 3 * 4 * Copyright (C) 2006-2008 Nokia Corporation. 5 * 6 * SPDX-License-Identifier: GPL-2.0+ 7 * 8 * Authors: Artem Bityutskiy (Битюцкий Артём) 9 * Adrian Hunter 10 */ 11 12 /* 13 * This file is a part of UBIFS journal implementation and contains various 14 * functions which manipulate the log. The log is a fixed area on the flash 15 * which does not contain any data but refers to buds. The log is a part of the 16 * journal. 17 */ 18 19 #define __UBOOT__ 20 #ifdef __UBOOT__ 21 #include <linux/err.h> 22 #endif 23 #include "ubifs.h" 24 25 static int dbg_check_bud_bytes(struct ubifs_info *c); 26 27 /** 28 * ubifs_search_bud - search bud LEB. 29 * @c: UBIFS file-system description object 30 * @lnum: logical eraseblock number to search 31 * 32 * This function searches bud LEB @lnum. Returns bud description object in case 33 * of success and %NULL if there is no bud with this LEB number. 34 */ 35 struct ubifs_bud *ubifs_search_bud(struct ubifs_info *c, int lnum) 36 { 37 struct rb_node *p; 38 struct ubifs_bud *bud; 39 40 spin_lock(&c->buds_lock); 41 p = c->buds.rb_node; 42 while (p) { 43 bud = rb_entry(p, struct ubifs_bud, rb); 44 if (lnum < bud->lnum) 45 p = p->rb_left; 46 else if (lnum > bud->lnum) 47 p = p->rb_right; 48 else { 49 spin_unlock(&c->buds_lock); 50 return bud; 51 } 52 } 53 spin_unlock(&c->buds_lock); 54 return NULL; 55 } 56 57 /** 58 * ubifs_get_wbuf - get the wbuf associated with a LEB, if there is one. 59 * @c: UBIFS file-system description object 60 * @lnum: logical eraseblock number to search 61 * 62 * This functions returns the wbuf for @lnum or %NULL if there is not one. 63 */ 64 struct ubifs_wbuf *ubifs_get_wbuf(struct ubifs_info *c, int lnum) 65 { 66 struct rb_node *p; 67 struct ubifs_bud *bud; 68 int jhead; 69 70 if (!c->jheads) 71 return NULL; 72 73 spin_lock(&c->buds_lock); 74 p = c->buds.rb_node; 75 while (p) { 76 bud = rb_entry(p, struct ubifs_bud, rb); 77 if (lnum < bud->lnum) 78 p = p->rb_left; 79 else if (lnum > bud->lnum) 80 p = p->rb_right; 81 else { 82 jhead = bud->jhead; 83 spin_unlock(&c->buds_lock); 84 return &c->jheads[jhead].wbuf; 85 } 86 } 87 spin_unlock(&c->buds_lock); 88 return NULL; 89 } 90 91 /** 92 * empty_log_bytes - calculate amount of empty space in the log. 93 * @c: UBIFS file-system description object 94 */ 95 static inline long long empty_log_bytes(const struct ubifs_info *c) 96 { 97 long long h, t; 98 99 h = (long long)c->lhead_lnum * c->leb_size + c->lhead_offs; 100 t = (long long)c->ltail_lnum * c->leb_size; 101 102 if (h >= t) 103 return c->log_bytes - h + t; 104 else 105 return t - h; 106 } 107 108 /** 109 * ubifs_add_bud - add bud LEB to the tree of buds and its journal head list. 110 * @c: UBIFS file-system description object 111 * @bud: the bud to add 112 */ 113 void ubifs_add_bud(struct ubifs_info *c, struct ubifs_bud *bud) 114 { 115 struct rb_node **p, *parent = NULL; 116 struct ubifs_bud *b; 117 struct ubifs_jhead *jhead; 118 119 spin_lock(&c->buds_lock); 120 p = &c->buds.rb_node; 121 while (*p) { 122 parent = *p; 123 b = rb_entry(parent, struct ubifs_bud, rb); 124 ubifs_assert(bud->lnum != b->lnum); 125 if (bud->lnum < b->lnum) 126 p = &(*p)->rb_left; 127 else 128 p = &(*p)->rb_right; 129 } 130 131 rb_link_node(&bud->rb, parent, p); 132 rb_insert_color(&bud->rb, &c->buds); 133 if (c->jheads) { 134 jhead = &c->jheads[bud->jhead]; 135 list_add_tail(&bud->list, &jhead->buds_list); 136 } else 137 ubifs_assert(c->replaying && c->ro_mount); 138 139 /* 140 * Note, although this is a new bud, we anyway account this space now, 141 * before any data has been written to it, because this is about to 142 * guarantee fixed mount time, and this bud will anyway be read and 143 * scanned. 144 */ 145 c->bud_bytes += c->leb_size - bud->start; 146 147 dbg_log("LEB %d:%d, jhead %s, bud_bytes %lld", bud->lnum, 148 bud->start, dbg_jhead(bud->jhead), c->bud_bytes); 149 spin_unlock(&c->buds_lock); 150 } 151 152 /** 153 * ubifs_add_bud_to_log - add a new bud to the log. 154 * @c: UBIFS file-system description object 155 * @jhead: journal head the bud belongs to 156 * @lnum: LEB number of the bud 157 * @offs: starting offset of the bud 158 * 159 * This function writes reference node for the new bud LEB @lnum it to the log, 160 * and adds it to the buds tress. It also makes sure that log size does not 161 * exceed the 'c->max_bud_bytes' limit. Returns zero in case of success, 162 * %-EAGAIN if commit is required, and a negative error codes in case of 163 * failure. 164 */ 165 int ubifs_add_bud_to_log(struct ubifs_info *c, int jhead, int lnum, int offs) 166 { 167 int err; 168 struct ubifs_bud *bud; 169 struct ubifs_ref_node *ref; 170 171 bud = kmalloc(sizeof(struct ubifs_bud), GFP_NOFS); 172 if (!bud) 173 return -ENOMEM; 174 ref = kzalloc(c->ref_node_alsz, GFP_NOFS); 175 if (!ref) { 176 kfree(bud); 177 return -ENOMEM; 178 } 179 180 mutex_lock(&c->log_mutex); 181 ubifs_assert(!c->ro_media && !c->ro_mount); 182 if (c->ro_error) { 183 err = -EROFS; 184 goto out_unlock; 185 } 186 187 /* Make sure we have enough space in the log */ 188 if (empty_log_bytes(c) - c->ref_node_alsz < c->min_log_bytes) { 189 dbg_log("not enough log space - %lld, required %d", 190 empty_log_bytes(c), c->min_log_bytes); 191 ubifs_commit_required(c); 192 err = -EAGAIN; 193 goto out_unlock; 194 } 195 196 /* 197 * Make sure the amount of space in buds will not exceed the 198 * 'c->max_bud_bytes' limit, because we want to guarantee mount time 199 * limits. 200 * 201 * It is not necessary to hold @c->buds_lock when reading @c->bud_bytes 202 * because we are holding @c->log_mutex. All @c->bud_bytes take place 203 * when both @c->log_mutex and @c->bud_bytes are locked. 204 */ 205 if (c->bud_bytes + c->leb_size - offs > c->max_bud_bytes) { 206 dbg_log("bud bytes %lld (%lld max), require commit", 207 c->bud_bytes, c->max_bud_bytes); 208 ubifs_commit_required(c); 209 err = -EAGAIN; 210 goto out_unlock; 211 } 212 213 /* 214 * If the journal is full enough - start background commit. Note, it is 215 * OK to read 'c->cmt_state' without spinlock because integer reads 216 * are atomic in the kernel. 217 */ 218 if (c->bud_bytes >= c->bg_bud_bytes && 219 c->cmt_state == COMMIT_RESTING) { 220 dbg_log("bud bytes %lld (%lld max), initiate BG commit", 221 c->bud_bytes, c->max_bud_bytes); 222 ubifs_request_bg_commit(c); 223 } 224 225 bud->lnum = lnum; 226 bud->start = offs; 227 bud->jhead = jhead; 228 229 ref->ch.node_type = UBIFS_REF_NODE; 230 ref->lnum = cpu_to_le32(bud->lnum); 231 ref->offs = cpu_to_le32(bud->start); 232 ref->jhead = cpu_to_le32(jhead); 233 234 if (c->lhead_offs > c->leb_size - c->ref_node_alsz) { 235 c->lhead_lnum = ubifs_next_log_lnum(c, c->lhead_lnum); 236 c->lhead_offs = 0; 237 } 238 239 if (c->lhead_offs == 0) { 240 /* Must ensure next log LEB has been unmapped */ 241 err = ubifs_leb_unmap(c, c->lhead_lnum); 242 if (err) 243 goto out_unlock; 244 } 245 246 if (bud->start == 0) { 247 /* 248 * Before writing the LEB reference which refers an empty LEB 249 * to the log, we have to make sure it is mapped, because 250 * otherwise we'd risk to refer an LEB with garbage in case of 251 * an unclean reboot, because the target LEB might have been 252 * unmapped, but not yet physically erased. 253 */ 254 err = ubifs_leb_map(c, bud->lnum); 255 if (err) 256 goto out_unlock; 257 } 258 259 dbg_log("write ref LEB %d:%d", 260 c->lhead_lnum, c->lhead_offs); 261 err = ubifs_write_node(c, ref, UBIFS_REF_NODE_SZ, c->lhead_lnum, 262 c->lhead_offs); 263 if (err) 264 goto out_unlock; 265 266 c->lhead_offs += c->ref_node_alsz; 267 268 ubifs_add_bud(c, bud); 269 270 mutex_unlock(&c->log_mutex); 271 kfree(ref); 272 return 0; 273 274 out_unlock: 275 mutex_unlock(&c->log_mutex); 276 kfree(ref); 277 kfree(bud); 278 return err; 279 } 280 281 /** 282 * remove_buds - remove used buds. 283 * @c: UBIFS file-system description object 284 * 285 * This function removes use buds from the buds tree. It does not remove the 286 * buds which are pointed to by journal heads. 287 */ 288 static void remove_buds(struct ubifs_info *c) 289 { 290 struct rb_node *p; 291 292 ubifs_assert(list_empty(&c->old_buds)); 293 c->cmt_bud_bytes = 0; 294 spin_lock(&c->buds_lock); 295 p = rb_first(&c->buds); 296 while (p) { 297 struct rb_node *p1 = p; 298 struct ubifs_bud *bud; 299 struct ubifs_wbuf *wbuf; 300 301 p = rb_next(p); 302 bud = rb_entry(p1, struct ubifs_bud, rb); 303 wbuf = &c->jheads[bud->jhead].wbuf; 304 305 if (wbuf->lnum == bud->lnum) { 306 /* 307 * Do not remove buds which are pointed to by journal 308 * heads (non-closed buds). 309 */ 310 c->cmt_bud_bytes += wbuf->offs - bud->start; 311 dbg_log("preserve %d:%d, jhead %s, bud bytes %d, cmt_bud_bytes %lld", 312 bud->lnum, bud->start, dbg_jhead(bud->jhead), 313 wbuf->offs - bud->start, c->cmt_bud_bytes); 314 bud->start = wbuf->offs; 315 } else { 316 c->cmt_bud_bytes += c->leb_size - bud->start; 317 dbg_log("remove %d:%d, jhead %s, bud bytes %d, cmt_bud_bytes %lld", 318 bud->lnum, bud->start, dbg_jhead(bud->jhead), 319 c->leb_size - bud->start, c->cmt_bud_bytes); 320 rb_erase(p1, &c->buds); 321 /* 322 * If the commit does not finish, the recovery will need 323 * to replay the journal, in which case the old buds 324 * must be unchanged. Do not release them until post 325 * commit i.e. do not allow them to be garbage 326 * collected. 327 */ 328 list_move(&bud->list, &c->old_buds); 329 } 330 } 331 spin_unlock(&c->buds_lock); 332 } 333 334 /** 335 * ubifs_log_start_commit - start commit. 336 * @c: UBIFS file-system description object 337 * @ltail_lnum: return new log tail LEB number 338 * 339 * The commit operation starts with writing "commit start" node to the log and 340 * reference nodes for all journal heads which will define new journal after 341 * the commit has been finished. The commit start and reference nodes are 342 * written in one go to the nearest empty log LEB (hence, when commit is 343 * finished UBIFS may safely unmap all the previous log LEBs). This function 344 * returns zero in case of success and a negative error code in case of 345 * failure. 346 */ 347 int ubifs_log_start_commit(struct ubifs_info *c, int *ltail_lnum) 348 { 349 void *buf; 350 struct ubifs_cs_node *cs; 351 struct ubifs_ref_node *ref; 352 int err, i, max_len, len; 353 354 err = dbg_check_bud_bytes(c); 355 if (err) 356 return err; 357 358 max_len = UBIFS_CS_NODE_SZ + c->jhead_cnt * UBIFS_REF_NODE_SZ; 359 max_len = ALIGN(max_len, c->min_io_size); 360 buf = cs = kmalloc(max_len, GFP_NOFS); 361 if (!buf) 362 return -ENOMEM; 363 364 cs->ch.node_type = UBIFS_CS_NODE; 365 cs->cmt_no = cpu_to_le64(c->cmt_no); 366 ubifs_prepare_node(c, cs, UBIFS_CS_NODE_SZ, 0); 367 368 /* 369 * Note, we do not lock 'c->log_mutex' because this is the commit start 370 * phase and we are exclusively using the log. And we do not lock 371 * write-buffer because nobody can write to the file-system at this 372 * phase. 373 */ 374 375 len = UBIFS_CS_NODE_SZ; 376 for (i = 0; i < c->jhead_cnt; i++) { 377 int lnum = c->jheads[i].wbuf.lnum; 378 int offs = c->jheads[i].wbuf.offs; 379 380 if (lnum == -1 || offs == c->leb_size) 381 continue; 382 383 dbg_log("add ref to LEB %d:%d for jhead %s", 384 lnum, offs, dbg_jhead(i)); 385 ref = buf + len; 386 ref->ch.node_type = UBIFS_REF_NODE; 387 ref->lnum = cpu_to_le32(lnum); 388 ref->offs = cpu_to_le32(offs); 389 ref->jhead = cpu_to_le32(i); 390 391 ubifs_prepare_node(c, ref, UBIFS_REF_NODE_SZ, 0); 392 len += UBIFS_REF_NODE_SZ; 393 } 394 395 ubifs_pad(c, buf + len, ALIGN(len, c->min_io_size) - len); 396 397 /* Switch to the next log LEB */ 398 if (c->lhead_offs) { 399 c->lhead_lnum = ubifs_next_log_lnum(c, c->lhead_lnum); 400 c->lhead_offs = 0; 401 } 402 403 if (c->lhead_offs == 0) { 404 /* Must ensure next LEB has been unmapped */ 405 err = ubifs_leb_unmap(c, c->lhead_lnum); 406 if (err) 407 goto out; 408 } 409 410 len = ALIGN(len, c->min_io_size); 411 dbg_log("writing commit start at LEB %d:0, len %d", c->lhead_lnum, len); 412 err = ubifs_leb_write(c, c->lhead_lnum, cs, 0, len); 413 if (err) 414 goto out; 415 416 *ltail_lnum = c->lhead_lnum; 417 418 c->lhead_offs += len; 419 if (c->lhead_offs == c->leb_size) { 420 c->lhead_lnum = ubifs_next_log_lnum(c, c->lhead_lnum); 421 c->lhead_offs = 0; 422 } 423 424 remove_buds(c); 425 426 /* 427 * We have started the commit and now users may use the rest of the log 428 * for new writes. 429 */ 430 c->min_log_bytes = 0; 431 432 out: 433 kfree(buf); 434 return err; 435 } 436 437 /** 438 * ubifs_log_end_commit - end commit. 439 * @c: UBIFS file-system description object 440 * @ltail_lnum: new log tail LEB number 441 * 442 * This function is called on when the commit operation was finished. It 443 * moves log tail to new position and unmaps LEBs which contain obsolete data. 444 * Returns zero in case of success and a negative error code in case of 445 * failure. 446 */ 447 int ubifs_log_end_commit(struct ubifs_info *c, int ltail_lnum) 448 { 449 int err; 450 451 /* 452 * At this phase we have to lock 'c->log_mutex' because UBIFS allows FS 453 * writes during commit. Its only short "commit" start phase when 454 * writers are blocked. 455 */ 456 mutex_lock(&c->log_mutex); 457 458 dbg_log("old tail was LEB %d:0, new tail is LEB %d:0", 459 c->ltail_lnum, ltail_lnum); 460 461 c->ltail_lnum = ltail_lnum; 462 /* 463 * The commit is finished and from now on it must be guaranteed that 464 * there is always enough space for the next commit. 465 */ 466 c->min_log_bytes = c->leb_size; 467 468 spin_lock(&c->buds_lock); 469 c->bud_bytes -= c->cmt_bud_bytes; 470 spin_unlock(&c->buds_lock); 471 472 err = dbg_check_bud_bytes(c); 473 474 mutex_unlock(&c->log_mutex); 475 return err; 476 } 477 478 /** 479 * ubifs_log_post_commit - things to do after commit is completed. 480 * @c: UBIFS file-system description object 481 * @old_ltail_lnum: old log tail LEB number 482 * 483 * Release buds only after commit is completed, because they must be unchanged 484 * if recovery is needed. 485 * 486 * Unmap log LEBs only after commit is completed, because they may be needed for 487 * recovery. 488 * 489 * This function returns %0 on success and a negative error code on failure. 490 */ 491 int ubifs_log_post_commit(struct ubifs_info *c, int old_ltail_lnum) 492 { 493 int lnum, err = 0; 494 495 while (!list_empty(&c->old_buds)) { 496 struct ubifs_bud *bud; 497 498 bud = list_entry(c->old_buds.next, struct ubifs_bud, list); 499 err = ubifs_return_leb(c, bud->lnum); 500 if (err) 501 return err; 502 list_del(&bud->list); 503 kfree(bud); 504 } 505 mutex_lock(&c->log_mutex); 506 for (lnum = old_ltail_lnum; lnum != c->ltail_lnum; 507 lnum = ubifs_next_log_lnum(c, lnum)) { 508 dbg_log("unmap log LEB %d", lnum); 509 err = ubifs_leb_unmap(c, lnum); 510 if (err) 511 goto out; 512 } 513 out: 514 mutex_unlock(&c->log_mutex); 515 return err; 516 } 517 518 /** 519 * struct done_ref - references that have been done. 520 * @rb: rb-tree node 521 * @lnum: LEB number 522 */ 523 struct done_ref { 524 struct rb_node rb; 525 int lnum; 526 }; 527 528 /** 529 * done_already - determine if a reference has been done already. 530 * @done_tree: rb-tree to store references that have been done 531 * @lnum: LEB number of reference 532 * 533 * This function returns %1 if the reference has been done, %0 if not, otherwise 534 * a negative error code is returned. 535 */ 536 static int done_already(struct rb_root *done_tree, int lnum) 537 { 538 struct rb_node **p = &done_tree->rb_node, *parent = NULL; 539 struct done_ref *dr; 540 541 while (*p) { 542 parent = *p; 543 dr = rb_entry(parent, struct done_ref, rb); 544 if (lnum < dr->lnum) 545 p = &(*p)->rb_left; 546 else if (lnum > dr->lnum) 547 p = &(*p)->rb_right; 548 else 549 return 1; 550 } 551 552 dr = kzalloc(sizeof(struct done_ref), GFP_NOFS); 553 if (!dr) 554 return -ENOMEM; 555 556 dr->lnum = lnum; 557 558 rb_link_node(&dr->rb, parent, p); 559 rb_insert_color(&dr->rb, done_tree); 560 561 return 0; 562 } 563 564 /** 565 * destroy_done_tree - destroy the done tree. 566 * @done_tree: done tree to destroy 567 */ 568 static void destroy_done_tree(struct rb_root *done_tree) 569 { 570 struct done_ref *dr, *n; 571 572 rbtree_postorder_for_each_entry_safe(dr, n, done_tree, rb) 573 kfree(dr); 574 } 575 576 /** 577 * add_node - add a node to the consolidated log. 578 * @c: UBIFS file-system description object 579 * @buf: buffer to which to add 580 * @lnum: LEB number to which to write is passed and returned here 581 * @offs: offset to where to write is passed and returned here 582 * @node: node to add 583 * 584 * This function returns %0 on success and a negative error code on failure. 585 */ 586 static int add_node(struct ubifs_info *c, void *buf, int *lnum, int *offs, 587 void *node) 588 { 589 struct ubifs_ch *ch = node; 590 int len = le32_to_cpu(ch->len), remains = c->leb_size - *offs; 591 592 if (len > remains) { 593 int sz = ALIGN(*offs, c->min_io_size), err; 594 595 ubifs_pad(c, buf + *offs, sz - *offs); 596 err = ubifs_leb_change(c, *lnum, buf, sz); 597 if (err) 598 return err; 599 *lnum = ubifs_next_log_lnum(c, *lnum); 600 *offs = 0; 601 } 602 memcpy(buf + *offs, node, len); 603 *offs += ALIGN(len, 8); 604 return 0; 605 } 606 607 /** 608 * ubifs_consolidate_log - consolidate the log. 609 * @c: UBIFS file-system description object 610 * 611 * Repeated failed commits could cause the log to be full, but at least 1 LEB is 612 * needed for commit. This function rewrites the reference nodes in the log 613 * omitting duplicates, and failed CS nodes, and leaving no gaps. 614 * 615 * This function returns %0 on success and a negative error code on failure. 616 */ 617 int ubifs_consolidate_log(struct ubifs_info *c) 618 { 619 struct ubifs_scan_leb *sleb; 620 struct ubifs_scan_node *snod; 621 struct rb_root done_tree = RB_ROOT; 622 int lnum, err, first = 1, write_lnum, offs = 0; 623 void *buf; 624 625 dbg_rcvry("log tail LEB %d, log head LEB %d", c->ltail_lnum, 626 c->lhead_lnum); 627 buf = vmalloc(c->leb_size); 628 if (!buf) 629 return -ENOMEM; 630 lnum = c->ltail_lnum; 631 write_lnum = lnum; 632 while (1) { 633 sleb = ubifs_scan(c, lnum, 0, c->sbuf, 0); 634 if (IS_ERR(sleb)) { 635 err = PTR_ERR(sleb); 636 goto out_free; 637 } 638 list_for_each_entry(snod, &sleb->nodes, list) { 639 switch (snod->type) { 640 case UBIFS_REF_NODE: { 641 struct ubifs_ref_node *ref = snod->node; 642 int ref_lnum = le32_to_cpu(ref->lnum); 643 644 err = done_already(&done_tree, ref_lnum); 645 if (err < 0) 646 goto out_scan; 647 if (err != 1) { 648 err = add_node(c, buf, &write_lnum, 649 &offs, snod->node); 650 if (err) 651 goto out_scan; 652 } 653 break; 654 } 655 case UBIFS_CS_NODE: 656 if (!first) 657 break; 658 err = add_node(c, buf, &write_lnum, &offs, 659 snod->node); 660 if (err) 661 goto out_scan; 662 first = 0; 663 break; 664 } 665 } 666 ubifs_scan_destroy(sleb); 667 if (lnum == c->lhead_lnum) 668 break; 669 lnum = ubifs_next_log_lnum(c, lnum); 670 } 671 if (offs) { 672 int sz = ALIGN(offs, c->min_io_size); 673 674 ubifs_pad(c, buf + offs, sz - offs); 675 err = ubifs_leb_change(c, write_lnum, buf, sz); 676 if (err) 677 goto out_free; 678 offs = ALIGN(offs, c->min_io_size); 679 } 680 destroy_done_tree(&done_tree); 681 vfree(buf); 682 if (write_lnum == c->lhead_lnum) { 683 ubifs_err("log is too full"); 684 return -EINVAL; 685 } 686 /* Unmap remaining LEBs */ 687 lnum = write_lnum; 688 do { 689 lnum = ubifs_next_log_lnum(c, lnum); 690 err = ubifs_leb_unmap(c, lnum); 691 if (err) 692 return err; 693 } while (lnum != c->lhead_lnum); 694 c->lhead_lnum = write_lnum; 695 c->lhead_offs = offs; 696 dbg_rcvry("new log head at %d:%d", c->lhead_lnum, c->lhead_offs); 697 return 0; 698 699 out_scan: 700 ubifs_scan_destroy(sleb); 701 out_free: 702 destroy_done_tree(&done_tree); 703 vfree(buf); 704 return err; 705 } 706 707 /** 708 * dbg_check_bud_bytes - make sure bud bytes calculation are all right. 709 * @c: UBIFS file-system description object 710 * 711 * This function makes sure the amount of flash space used by closed buds 712 * ('c->bud_bytes' is correct). Returns zero in case of success and %-EINVAL in 713 * case of failure. 714 */ 715 static int dbg_check_bud_bytes(struct ubifs_info *c) 716 { 717 int i, err = 0; 718 struct ubifs_bud *bud; 719 long long bud_bytes = 0; 720 721 if (!dbg_is_chk_gen(c)) 722 return 0; 723 724 spin_lock(&c->buds_lock); 725 for (i = 0; i < c->jhead_cnt; i++) 726 list_for_each_entry(bud, &c->jheads[i].buds_list, list) 727 bud_bytes += c->leb_size - bud->start; 728 729 if (c->bud_bytes != bud_bytes) { 730 ubifs_err("bad bud_bytes %lld, calculated %lld", 731 c->bud_bytes, bud_bytes); 732 err = -EINVAL; 733 } 734 spin_unlock(&c->buds_lock); 735 736 return err; 737 } 738