1 /* 2 * This file is part of UBIFS. 3 * 4 * Copyright (C) 2006-2008 Nokia Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 as published by 8 * the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along with 16 * this program; if not, write to the Free Software Foundation, Inc., 51 17 * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 18 * 19 * Authors: Adrian Hunter 20 * Artem Bityutskiy (Битюцкий Артём) 21 */ 22 23 /* 24 * This file implements garbage collection. The procedure for garbage collection 25 * is different depending on whether a LEB as an index LEB (contains index 26 * nodes) or not. For non-index LEBs, garbage collection finds a LEB which 27 * contains a lot of dirty space (obsolete nodes), and copies the non-obsolete 28 * nodes to the journal, at which point the garbage-collected LEB is free to be 29 * reused. For index LEBs, garbage collection marks the non-obsolete index nodes 30 * dirty in the TNC, and after the next commit, the garbage-collected LEB is 31 * to be reused. Garbage collection will cause the number of dirty index nodes 32 * to grow, however sufficient space is reserved for the index to ensure the 33 * commit will never run out of space. 34 * 35 * Notes about dead watermark. At current UBIFS implementation we assume that 36 * LEBs which have less than @c->dead_wm bytes of free + dirty space are full 37 * and not worth garbage-collecting. The dead watermark is one min. I/O unit 38 * size, or min. UBIFS node size, depending on what is greater. Indeed, UBIFS 39 * Garbage Collector has to synchronize the GC head's write buffer before 40 * returning, so this is about wasting one min. I/O unit. However, UBIFS GC can 41 * actually reclaim even very small pieces of dirty space by garbage collecting 42 * enough dirty LEBs, but we do not bother doing this at this implementation. 43 * 44 * Notes about dark watermark. The results of GC work depends on how big are 45 * the UBIFS nodes GC deals with. Large nodes make GC waste more space. Indeed, 46 * if GC move data from LEB A to LEB B and nodes in LEB A are large, GC would 47 * have to waste large pieces of free space at the end of LEB B, because nodes 48 * from LEB A would not fit. And the worst situation is when all nodes are of 49 * maximum size. So dark watermark is the amount of free + dirty space in LEB 50 * which are guaranteed to be reclaimable. If LEB has less space, the GC might 51 * be unable to reclaim it. So, LEBs with free + dirty greater than dark 52 * watermark are "good" LEBs from GC's point of few. The other LEBs are not so 53 * good, and GC takes extra care when moving them. 54 */ 55 56 #include <linux/slab.h> 57 #include <linux/pagemap.h> 58 #include <linux/list_sort.h> 59 #include "ubifs.h" 60 61 /* 62 * GC may need to move more than one LEB to make progress. The below constants 63 * define "soft" and "hard" limits on the number of LEBs the garbage collector 64 * may move. 65 */ 66 #define SOFT_LEBS_LIMIT 4 67 #define HARD_LEBS_LIMIT 32 68 69 /** 70 * switch_gc_head - switch the garbage collection journal head. 71 * @c: UBIFS file-system description object 72 * @buf: buffer to write 73 * @len: length of the buffer to write 74 * @lnum: LEB number written is returned here 75 * @offs: offset written is returned here 76 * 77 * This function switch the GC head to the next LEB which is reserved in 78 * @c->gc_lnum. Returns %0 in case of success, %-EAGAIN if commit is required, 79 * and other negative error code in case of failures. 80 */ 81 static int switch_gc_head(struct ubifs_info *c) 82 { 83 int err, gc_lnum = c->gc_lnum; 84 struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf; 85 86 ubifs_assert(gc_lnum != -1); 87 dbg_gc("switch GC head from LEB %d:%d to LEB %d (waste %d bytes)", 88 wbuf->lnum, wbuf->offs + wbuf->used, gc_lnum, 89 c->leb_size - wbuf->offs - wbuf->used); 90 91 err = ubifs_wbuf_sync_nolock(wbuf); 92 if (err) 93 return err; 94 95 /* 96 * The GC write-buffer was synchronized, we may safely unmap 97 * 'c->gc_lnum'. 98 */ 99 err = ubifs_leb_unmap(c, gc_lnum); 100 if (err) 101 return err; 102 103 err = ubifs_add_bud_to_log(c, GCHD, gc_lnum, 0); 104 if (err) 105 return err; 106 107 c->gc_lnum = -1; 108 err = ubifs_wbuf_seek_nolock(wbuf, gc_lnum, 0, UBI_LONGTERM); 109 return err; 110 } 111 112 /** 113 * data_nodes_cmp - compare 2 data nodes. 114 * @priv: UBIFS file-system description object 115 * @a: first data node 116 * @a: second data node 117 * 118 * This function compares data nodes @a and @b. Returns %1 if @a has greater 119 * inode or block number, and %-1 otherwise. 120 */ 121 int data_nodes_cmp(void *priv, struct list_head *a, struct list_head *b) 122 { 123 ino_t inuma, inumb; 124 struct ubifs_info *c = priv; 125 struct ubifs_scan_node *sa, *sb; 126 127 cond_resched(); 128 if (a == b) 129 return 0; 130 131 sa = list_entry(a, struct ubifs_scan_node, list); 132 sb = list_entry(b, struct ubifs_scan_node, list); 133 134 ubifs_assert(key_type(c, &sa->key) == UBIFS_DATA_KEY); 135 ubifs_assert(key_type(c, &sb->key) == UBIFS_DATA_KEY); 136 ubifs_assert(sa->type == UBIFS_DATA_NODE); 137 ubifs_assert(sb->type == UBIFS_DATA_NODE); 138 139 inuma = key_inum(c, &sa->key); 140 inumb = key_inum(c, &sb->key); 141 142 if (inuma == inumb) { 143 unsigned int blka = key_block(c, &sa->key); 144 unsigned int blkb = key_block(c, &sb->key); 145 146 if (blka <= blkb) 147 return -1; 148 } else if (inuma <= inumb) 149 return -1; 150 151 return 1; 152 } 153 154 /* 155 * nondata_nodes_cmp - compare 2 non-data nodes. 156 * @priv: UBIFS file-system description object 157 * @a: first node 158 * @a: second node 159 * 160 * This function compares nodes @a and @b. It makes sure that inode nodes go 161 * first and sorted by length in descending order. Directory entry nodes go 162 * after inode nodes and are sorted in ascending hash valuer order. 163 */ 164 int nondata_nodes_cmp(void *priv, struct list_head *a, struct list_head *b) 165 { 166 ino_t inuma, inumb; 167 struct ubifs_info *c = priv; 168 struct ubifs_scan_node *sa, *sb; 169 170 cond_resched(); 171 if (a == b) 172 return 0; 173 174 sa = list_entry(a, struct ubifs_scan_node, list); 175 sb = list_entry(b, struct ubifs_scan_node, list); 176 177 ubifs_assert(key_type(c, &sa->key) != UBIFS_DATA_KEY && 178 key_type(c, &sb->key) != UBIFS_DATA_KEY); 179 ubifs_assert(sa->type != UBIFS_DATA_NODE && 180 sb->type != UBIFS_DATA_NODE); 181 182 /* Inodes go before directory entries */ 183 if (sa->type == UBIFS_INO_NODE) { 184 if (sb->type == UBIFS_INO_NODE) 185 return sb->len - sa->len; 186 return -1; 187 } 188 if (sb->type == UBIFS_INO_NODE) 189 return 1; 190 191 ubifs_assert(key_type(c, &sa->key) == UBIFS_DENT_KEY || 192 key_type(c, &sa->key) == UBIFS_XENT_KEY); 193 ubifs_assert(key_type(c, &sb->key) == UBIFS_DENT_KEY || 194 key_type(c, &sb->key) == UBIFS_XENT_KEY); 195 ubifs_assert(sa->type == UBIFS_DENT_NODE || 196 sa->type == UBIFS_XENT_NODE); 197 ubifs_assert(sb->type == UBIFS_DENT_NODE || 198 sb->type == UBIFS_XENT_NODE); 199 200 inuma = key_inum(c, &sa->key); 201 inumb = key_inum(c, &sb->key); 202 203 if (inuma == inumb) { 204 uint32_t hasha = key_hash(c, &sa->key); 205 uint32_t hashb = key_hash(c, &sb->key); 206 207 if (hasha <= hashb) 208 return -1; 209 } else if (inuma <= inumb) 210 return -1; 211 212 return 1; 213 } 214 215 /** 216 * sort_nodes - sort nodes for GC. 217 * @c: UBIFS file-system description object 218 * @sleb: describes nodes to sort and contains the result on exit 219 * @nondata: contains non-data nodes on exit 220 * @min: minimum node size is returned here 221 * 222 * This function sorts the list of inodes to garbage collect. First of all, it 223 * kills obsolete nodes and separates data and non-data nodes to the 224 * @sleb->nodes and @nondata lists correspondingly. 225 * 226 * Data nodes are then sorted in block number order - this is important for 227 * bulk-read; data nodes with lower inode number go before data nodes with 228 * higher inode number, and data nodes with lower block number go before data 229 * nodes with higher block number; 230 * 231 * Non-data nodes are sorted as follows. 232 * o First go inode nodes - they are sorted in descending length order. 233 * o Then go directory entry nodes - they are sorted in hash order, which 234 * should supposedly optimize 'readdir()'. Direntry nodes with lower parent 235 * inode number go before direntry nodes with higher parent inode number, 236 * and direntry nodes with lower name hash values go before direntry nodes 237 * with higher name hash values. 238 * 239 * This function returns zero in case of success and a negative error code in 240 * case of failure. 241 */ 242 static int sort_nodes(struct ubifs_info *c, struct ubifs_scan_leb *sleb, 243 struct list_head *nondata, int *min) 244 { 245 int err; 246 struct ubifs_scan_node *snod, *tmp; 247 248 *min = INT_MAX; 249 250 /* Separate data nodes and non-data nodes */ 251 list_for_each_entry_safe(snod, tmp, &sleb->nodes, list) { 252 ubifs_assert(snod->type == UBIFS_INO_NODE || 253 snod->type == UBIFS_DATA_NODE || 254 snod->type == UBIFS_DENT_NODE || 255 snod->type == UBIFS_XENT_NODE || 256 snod->type == UBIFS_TRUN_NODE); 257 258 if (snod->type != UBIFS_INO_NODE && 259 snod->type != UBIFS_DATA_NODE && 260 snod->type != UBIFS_DENT_NODE && 261 snod->type != UBIFS_XENT_NODE) { 262 /* Probably truncation node, zap it */ 263 list_del(&snod->list); 264 kfree(snod); 265 continue; 266 } 267 268 ubifs_assert(key_type(c, &snod->key) == UBIFS_DATA_KEY || 269 key_type(c, &snod->key) == UBIFS_INO_KEY || 270 key_type(c, &snod->key) == UBIFS_DENT_KEY || 271 key_type(c, &snod->key) == UBIFS_XENT_KEY); 272 273 err = ubifs_tnc_has_node(c, &snod->key, 0, sleb->lnum, 274 snod->offs, 0); 275 if (err < 0) 276 return err; 277 278 if (!err) { 279 /* The node is obsolete, remove it from the list */ 280 list_del(&snod->list); 281 kfree(snod); 282 continue; 283 } 284 285 if (snod->len < *min) 286 *min = snod->len; 287 288 if (key_type(c, &snod->key) != UBIFS_DATA_KEY) 289 list_move_tail(&snod->list, nondata); 290 } 291 292 /* Sort data and non-data nodes */ 293 list_sort(c, &sleb->nodes, &data_nodes_cmp); 294 list_sort(c, nondata, &nondata_nodes_cmp); 295 296 err = dbg_check_data_nodes_order(c, &sleb->nodes); 297 if (err) 298 return err; 299 err = dbg_check_nondata_nodes_order(c, nondata); 300 if (err) 301 return err; 302 return 0; 303 } 304 305 /** 306 * move_node - move a node. 307 * @c: UBIFS file-system description object 308 * @sleb: describes the LEB to move nodes from 309 * @snod: the mode to move 310 * @wbuf: write-buffer to move node to 311 * 312 * This function moves node @snod to @wbuf, changes TNC correspondingly, and 313 * destroys @snod. Returns zero in case of success and a negative error code in 314 * case of failure. 315 */ 316 static int move_node(struct ubifs_info *c, struct ubifs_scan_leb *sleb, 317 struct ubifs_scan_node *snod, struct ubifs_wbuf *wbuf) 318 { 319 int err, new_lnum = wbuf->lnum, new_offs = wbuf->offs + wbuf->used; 320 321 cond_resched(); 322 err = ubifs_wbuf_write_nolock(wbuf, snod->node, snod->len); 323 if (err) 324 return err; 325 326 err = ubifs_tnc_replace(c, &snod->key, sleb->lnum, 327 snod->offs, new_lnum, new_offs, 328 snod->len); 329 list_del(&snod->list); 330 kfree(snod); 331 return err; 332 } 333 334 /** 335 * move_nodes - move nodes. 336 * @c: UBIFS file-system description object 337 * @sleb: describes the LEB to move nodes from 338 * 339 * This function moves valid nodes from data LEB described by @sleb to the GC 340 * journal head. This function returns zero in case of success, %-EAGAIN if 341 * commit is required, and other negative error codes in case of other 342 * failures. 343 */ 344 static int move_nodes(struct ubifs_info *c, struct ubifs_scan_leb *sleb) 345 { 346 int err, min; 347 LIST_HEAD(nondata); 348 struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf; 349 350 if (wbuf->lnum == -1) { 351 /* 352 * The GC journal head is not set, because it is the first GC 353 * invocation since mount. 354 */ 355 err = switch_gc_head(c); 356 if (err) 357 return err; 358 } 359 360 err = sort_nodes(c, sleb, &nondata, &min); 361 if (err) 362 goto out; 363 364 /* Write nodes to their new location. Use the first-fit strategy */ 365 while (1) { 366 int avail; 367 struct ubifs_scan_node *snod, *tmp; 368 369 /* Move data nodes */ 370 list_for_each_entry_safe(snod, tmp, &sleb->nodes, list) { 371 avail = c->leb_size - wbuf->offs - wbuf->used; 372 if (snod->len > avail) 373 /* 374 * Do not skip data nodes in order to optimize 375 * bulk-read. 376 */ 377 break; 378 379 err = move_node(c, sleb, snod, wbuf); 380 if (err) 381 goto out; 382 } 383 384 /* Move non-data nodes */ 385 list_for_each_entry_safe(snod, tmp, &nondata, list) { 386 avail = c->leb_size - wbuf->offs - wbuf->used; 387 if (avail < min) 388 break; 389 390 if (snod->len > avail) { 391 /* 392 * Keep going only if this is an inode with 393 * some data. Otherwise stop and switch the GC 394 * head. IOW, we assume that data-less inode 395 * nodes and direntry nodes are roughly of the 396 * same size. 397 */ 398 if (key_type(c, &snod->key) == UBIFS_DENT_KEY || 399 snod->len == UBIFS_INO_NODE_SZ) 400 break; 401 continue; 402 } 403 404 err = move_node(c, sleb, snod, wbuf); 405 if (err) 406 goto out; 407 } 408 409 if (list_empty(&sleb->nodes) && list_empty(&nondata)) 410 break; 411 412 /* 413 * Waste the rest of the space in the LEB and switch to the 414 * next LEB. 415 */ 416 err = switch_gc_head(c); 417 if (err) 418 goto out; 419 } 420 421 return 0; 422 423 out: 424 list_splice_tail(&nondata, &sleb->nodes); 425 return err; 426 } 427 428 /** 429 * gc_sync_wbufs - sync write-buffers for GC. 430 * @c: UBIFS file-system description object 431 * 432 * We must guarantee that obsoleting nodes are on flash. Unfortunately they may 433 * be in a write-buffer instead. That is, a node could be written to a 434 * write-buffer, obsoleting another node in a LEB that is GC'd. If that LEB is 435 * erased before the write-buffer is sync'd and then there is an unclean 436 * unmount, then an existing node is lost. To avoid this, we sync all 437 * write-buffers. 438 * 439 * This function returns %0 on success or a negative error code on failure. 440 */ 441 static int gc_sync_wbufs(struct ubifs_info *c) 442 { 443 int err, i; 444 445 for (i = 0; i < c->jhead_cnt; i++) { 446 if (i == GCHD) 447 continue; 448 err = ubifs_wbuf_sync(&c->jheads[i].wbuf); 449 if (err) 450 return err; 451 } 452 return 0; 453 } 454 455 /** 456 * ubifs_garbage_collect_leb - garbage-collect a logical eraseblock. 457 * @c: UBIFS file-system description object 458 * @lp: describes the LEB to garbage collect 459 * 460 * This function garbage-collects an LEB and returns one of the @LEB_FREED, 461 * @LEB_RETAINED, etc positive codes in case of success, %-EAGAIN if commit is 462 * required, and other negative error codes in case of failures. 463 */ 464 int ubifs_garbage_collect_leb(struct ubifs_info *c, struct ubifs_lprops *lp) 465 { 466 struct ubifs_scan_leb *sleb; 467 struct ubifs_scan_node *snod; 468 struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf; 469 int err = 0, lnum = lp->lnum; 470 471 ubifs_assert(c->gc_lnum != -1 || wbuf->offs + wbuf->used == 0 || 472 c->need_recovery); 473 ubifs_assert(c->gc_lnum != lnum); 474 ubifs_assert(wbuf->lnum != lnum); 475 476 /* 477 * We scan the entire LEB even though we only really need to scan up to 478 * (c->leb_size - lp->free). 479 */ 480 sleb = ubifs_scan(c, lnum, 0, c->sbuf, 0); 481 if (IS_ERR(sleb)) 482 return PTR_ERR(sleb); 483 484 ubifs_assert(!list_empty(&sleb->nodes)); 485 snod = list_entry(sleb->nodes.next, struct ubifs_scan_node, list); 486 487 if (snod->type == UBIFS_IDX_NODE) { 488 struct ubifs_gced_idx_leb *idx_gc; 489 490 dbg_gc("indexing LEB %d (free %d, dirty %d)", 491 lnum, lp->free, lp->dirty); 492 list_for_each_entry(snod, &sleb->nodes, list) { 493 struct ubifs_idx_node *idx = snod->node; 494 int level = le16_to_cpu(idx->level); 495 496 ubifs_assert(snod->type == UBIFS_IDX_NODE); 497 key_read(c, ubifs_idx_key(c, idx), &snod->key); 498 err = ubifs_dirty_idx_node(c, &snod->key, level, lnum, 499 snod->offs); 500 if (err) 501 goto out; 502 } 503 504 idx_gc = kmalloc(sizeof(struct ubifs_gced_idx_leb), GFP_NOFS); 505 if (!idx_gc) { 506 err = -ENOMEM; 507 goto out; 508 } 509 510 idx_gc->lnum = lnum; 511 idx_gc->unmap = 0; 512 list_add(&idx_gc->list, &c->idx_gc); 513 514 /* 515 * Don't release the LEB until after the next commit, because 516 * it may contain data which is needed for recovery. So 517 * although we freed this LEB, it will become usable only after 518 * the commit. 519 */ 520 err = ubifs_change_one_lp(c, lnum, c->leb_size, 0, 0, 521 LPROPS_INDEX, 1); 522 if (err) 523 goto out; 524 err = LEB_FREED_IDX; 525 } else { 526 dbg_gc("data LEB %d (free %d, dirty %d)", 527 lnum, lp->free, lp->dirty); 528 529 err = move_nodes(c, sleb); 530 if (err) 531 goto out_inc_seq; 532 533 err = gc_sync_wbufs(c); 534 if (err) 535 goto out_inc_seq; 536 537 err = ubifs_change_one_lp(c, lnum, c->leb_size, 0, 0, 0, 0); 538 if (err) 539 goto out_inc_seq; 540 541 /* Allow for races with TNC */ 542 c->gced_lnum = lnum; 543 smp_wmb(); 544 c->gc_seq += 1; 545 smp_wmb(); 546 547 if (c->gc_lnum == -1) { 548 c->gc_lnum = lnum; 549 err = LEB_RETAINED; 550 } else { 551 err = ubifs_wbuf_sync_nolock(wbuf); 552 if (err) 553 goto out; 554 555 err = ubifs_leb_unmap(c, lnum); 556 if (err) 557 goto out; 558 559 err = LEB_FREED; 560 } 561 } 562 563 out: 564 ubifs_scan_destroy(sleb); 565 return err; 566 567 out_inc_seq: 568 /* We may have moved at least some nodes so allow for races with TNC */ 569 c->gced_lnum = lnum; 570 smp_wmb(); 571 c->gc_seq += 1; 572 smp_wmb(); 573 goto out; 574 } 575 576 /** 577 * ubifs_garbage_collect - UBIFS garbage collector. 578 * @c: UBIFS file-system description object 579 * @anyway: do GC even if there are free LEBs 580 * 581 * This function does out-of-place garbage collection. The return codes are: 582 * o positive LEB number if the LEB has been freed and may be used; 583 * o %-EAGAIN if the caller has to run commit; 584 * o %-ENOSPC if GC failed to make any progress; 585 * o other negative error codes in case of other errors. 586 * 587 * Garbage collector writes data to the journal when GC'ing data LEBs, and just 588 * marking indexing nodes dirty when GC'ing indexing LEBs. Thus, at some point 589 * commit may be required. But commit cannot be run from inside GC, because the 590 * caller might be holding the commit lock, so %-EAGAIN is returned instead; 591 * And this error code means that the caller has to run commit, and re-run GC 592 * if there is still no free space. 593 * 594 * There are many reasons why this function may return %-EAGAIN: 595 * o the log is full and there is no space to write an LEB reference for 596 * @c->gc_lnum; 597 * o the journal is too large and exceeds size limitations; 598 * o GC moved indexing LEBs, but they can be used only after the commit; 599 * o the shrinker fails to find clean znodes to free and requests the commit; 600 * o etc. 601 * 602 * Note, if the file-system is close to be full, this function may return 603 * %-EAGAIN infinitely, so the caller has to limit amount of re-invocations of 604 * the function. E.g., this happens if the limits on the journal size are too 605 * tough and GC writes too much to the journal before an LEB is freed. This 606 * might also mean that the journal is too large, and the TNC becomes to big, 607 * so that the shrinker is constantly called, finds not clean znodes to free, 608 * and requests commit. Well, this may also happen if the journal is all right, 609 * but another kernel process consumes too much memory. Anyway, infinite 610 * %-EAGAIN may happen, but in some extreme/misconfiguration cases. 611 */ 612 int ubifs_garbage_collect(struct ubifs_info *c, int anyway) 613 { 614 int i, err, ret, min_space = c->dead_wm; 615 struct ubifs_lprops lp; 616 struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf; 617 618 ubifs_assert_cmt_locked(c); 619 ubifs_assert(!c->ro_media && !c->ro_mount); 620 621 if (ubifs_gc_should_commit(c)) 622 return -EAGAIN; 623 624 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead); 625 626 if (c->ro_error) { 627 ret = -EROFS; 628 goto out_unlock; 629 } 630 631 /* We expect the write-buffer to be empty on entry */ 632 ubifs_assert(!wbuf->used); 633 634 for (i = 0; ; i++) { 635 int space_before = c->leb_size - wbuf->offs - wbuf->used; 636 int space_after; 637 638 cond_resched(); 639 640 /* Give the commit an opportunity to run */ 641 if (ubifs_gc_should_commit(c)) { 642 ret = -EAGAIN; 643 break; 644 } 645 646 if (i > SOFT_LEBS_LIMIT && !list_empty(&c->idx_gc)) { 647 /* 648 * We've done enough iterations. Indexing LEBs were 649 * moved and will be available after the commit. 650 */ 651 dbg_gc("soft limit, some index LEBs GC'ed, -EAGAIN"); 652 ubifs_commit_required(c); 653 ret = -EAGAIN; 654 break; 655 } 656 657 if (i > HARD_LEBS_LIMIT) { 658 /* 659 * We've moved too many LEBs and have not made 660 * progress, give up. 661 */ 662 dbg_gc("hard limit, -ENOSPC"); 663 ret = -ENOSPC; 664 break; 665 } 666 667 /* 668 * Empty and freeable LEBs can turn up while we waited for 669 * the wbuf lock, or while we have been running GC. In that 670 * case, we should just return one of those instead of 671 * continuing to GC dirty LEBs. Hence we request 672 * 'ubifs_find_dirty_leb()' to return an empty LEB if it can. 673 */ 674 ret = ubifs_find_dirty_leb(c, &lp, min_space, anyway ? 0 : 1); 675 if (ret) { 676 if (ret == -ENOSPC) 677 dbg_gc("no more dirty LEBs"); 678 break; 679 } 680 681 dbg_gc("found LEB %d: free %d, dirty %d, sum %d " 682 "(min. space %d)", lp.lnum, lp.free, lp.dirty, 683 lp.free + lp.dirty, min_space); 684 685 if (lp.free + lp.dirty == c->leb_size) { 686 /* An empty LEB was returned */ 687 dbg_gc("LEB %d is free, return it", lp.lnum); 688 /* 689 * ubifs_find_dirty_leb() doesn't return freeable index 690 * LEBs. 691 */ 692 ubifs_assert(!(lp.flags & LPROPS_INDEX)); 693 if (lp.free != c->leb_size) { 694 /* 695 * Write buffers must be sync'd before 696 * unmapping freeable LEBs, because one of them 697 * may contain data which obsoletes something 698 * in 'lp.pnum'. 699 */ 700 ret = gc_sync_wbufs(c); 701 if (ret) 702 goto out; 703 ret = ubifs_change_one_lp(c, lp.lnum, 704 c->leb_size, 0, 0, 0, 705 0); 706 if (ret) 707 goto out; 708 } 709 ret = ubifs_leb_unmap(c, lp.lnum); 710 if (ret) 711 goto out; 712 ret = lp.lnum; 713 break; 714 } 715 716 space_before = c->leb_size - wbuf->offs - wbuf->used; 717 if (wbuf->lnum == -1) 718 space_before = 0; 719 720 ret = ubifs_garbage_collect_leb(c, &lp); 721 if (ret < 0) { 722 if (ret == -EAGAIN) { 723 /* 724 * This is not error, so we have to return the 725 * LEB to lprops. But if 'ubifs_return_leb()' 726 * fails, its failure code is propagated to the 727 * caller instead of the original '-EAGAIN'. 728 */ 729 err = ubifs_return_leb(c, lp.lnum); 730 if (err) 731 ret = err; 732 break; 733 } 734 goto out; 735 } 736 737 if (ret == LEB_FREED) { 738 /* An LEB has been freed and is ready for use */ 739 dbg_gc("LEB %d freed, return", lp.lnum); 740 ret = lp.lnum; 741 break; 742 } 743 744 if (ret == LEB_FREED_IDX) { 745 /* 746 * This was an indexing LEB and it cannot be 747 * immediately used. And instead of requesting the 748 * commit straight away, we try to garbage collect some 749 * more. 750 */ 751 dbg_gc("indexing LEB %d freed, continue", lp.lnum); 752 continue; 753 } 754 755 ubifs_assert(ret == LEB_RETAINED); 756 space_after = c->leb_size - wbuf->offs - wbuf->used; 757 dbg_gc("LEB %d retained, freed %d bytes", lp.lnum, 758 space_after - space_before); 759 760 if (space_after > space_before) { 761 /* GC makes progress, keep working */ 762 min_space >>= 1; 763 if (min_space < c->dead_wm) 764 min_space = c->dead_wm; 765 continue; 766 } 767 768 dbg_gc("did not make progress"); 769 770 /* 771 * GC moved an LEB bud have not done any progress. This means 772 * that the previous GC head LEB contained too few free space 773 * and the LEB which was GC'ed contained only large nodes which 774 * did not fit that space. 775 * 776 * We can do 2 things: 777 * 1. pick another LEB in a hope it'll contain a small node 778 * which will fit the space we have at the end of current GC 779 * head LEB, but there is no guarantee, so we try this out 780 * unless we have already been working for too long; 781 * 2. request an LEB with more dirty space, which will force 782 * 'ubifs_find_dirty_leb()' to start scanning the lprops 783 * table, instead of just picking one from the heap 784 * (previously it already picked the dirtiest LEB). 785 */ 786 if (i < SOFT_LEBS_LIMIT) { 787 dbg_gc("try again"); 788 continue; 789 } 790 791 min_space <<= 1; 792 if (min_space > c->dark_wm) 793 min_space = c->dark_wm; 794 dbg_gc("set min. space to %d", min_space); 795 } 796 797 if (ret == -ENOSPC && !list_empty(&c->idx_gc)) { 798 dbg_gc("no space, some index LEBs GC'ed, -EAGAIN"); 799 ubifs_commit_required(c); 800 ret = -EAGAIN; 801 } 802 803 err = ubifs_wbuf_sync_nolock(wbuf); 804 if (!err) 805 err = ubifs_leb_unmap(c, c->gc_lnum); 806 if (err) { 807 ret = err; 808 goto out; 809 } 810 out_unlock: 811 mutex_unlock(&wbuf->io_mutex); 812 return ret; 813 814 out: 815 ubifs_assert(ret < 0); 816 ubifs_assert(ret != -ENOSPC && ret != -EAGAIN); 817 ubifs_wbuf_sync_nolock(wbuf); 818 ubifs_ro_mode(c, ret); 819 mutex_unlock(&wbuf->io_mutex); 820 ubifs_return_leb(c, lp.lnum); 821 return ret; 822 } 823 824 /** 825 * ubifs_gc_start_commit - garbage collection at start of commit. 826 * @c: UBIFS file-system description object 827 * 828 * If a LEB has only dirty and free space, then we may safely unmap it and make 829 * it free. Note, we cannot do this with indexing LEBs because dirty space may 830 * correspond index nodes that are required for recovery. In that case, the 831 * LEB cannot be unmapped until after the next commit. 832 * 833 * This function returns %0 upon success and a negative error code upon failure. 834 */ 835 int ubifs_gc_start_commit(struct ubifs_info *c) 836 { 837 struct ubifs_gced_idx_leb *idx_gc; 838 const struct ubifs_lprops *lp; 839 int err = 0, flags; 840 841 ubifs_get_lprops(c); 842 843 /* 844 * Unmap (non-index) freeable LEBs. Note that recovery requires that all 845 * wbufs are sync'd before this, which is done in 'do_commit()'. 846 */ 847 while (1) { 848 lp = ubifs_fast_find_freeable(c); 849 if (IS_ERR(lp)) { 850 err = PTR_ERR(lp); 851 goto out; 852 } 853 if (!lp) 854 break; 855 ubifs_assert(!(lp->flags & LPROPS_TAKEN)); 856 ubifs_assert(!(lp->flags & LPROPS_INDEX)); 857 err = ubifs_leb_unmap(c, lp->lnum); 858 if (err) 859 goto out; 860 lp = ubifs_change_lp(c, lp, c->leb_size, 0, lp->flags, 0); 861 if (IS_ERR(lp)) { 862 err = PTR_ERR(lp); 863 goto out; 864 } 865 ubifs_assert(!(lp->flags & LPROPS_TAKEN)); 866 ubifs_assert(!(lp->flags & LPROPS_INDEX)); 867 } 868 869 /* Mark GC'd index LEBs OK to unmap after this commit finishes */ 870 list_for_each_entry(idx_gc, &c->idx_gc, list) 871 idx_gc->unmap = 1; 872 873 /* Record index freeable LEBs for unmapping after commit */ 874 while (1) { 875 lp = ubifs_fast_find_frdi_idx(c); 876 if (IS_ERR(lp)) { 877 err = PTR_ERR(lp); 878 goto out; 879 } 880 if (!lp) 881 break; 882 idx_gc = kmalloc(sizeof(struct ubifs_gced_idx_leb), GFP_NOFS); 883 if (!idx_gc) { 884 err = -ENOMEM; 885 goto out; 886 } 887 ubifs_assert(!(lp->flags & LPROPS_TAKEN)); 888 ubifs_assert(lp->flags & LPROPS_INDEX); 889 /* Don't release the LEB until after the next commit */ 890 flags = (lp->flags | LPROPS_TAKEN) ^ LPROPS_INDEX; 891 lp = ubifs_change_lp(c, lp, c->leb_size, 0, flags, 1); 892 if (IS_ERR(lp)) { 893 err = PTR_ERR(lp); 894 kfree(idx_gc); 895 goto out; 896 } 897 ubifs_assert(lp->flags & LPROPS_TAKEN); 898 ubifs_assert(!(lp->flags & LPROPS_INDEX)); 899 idx_gc->lnum = lp->lnum; 900 idx_gc->unmap = 1; 901 list_add(&idx_gc->list, &c->idx_gc); 902 } 903 out: 904 ubifs_release_lprops(c); 905 return err; 906 } 907 908 /** 909 * ubifs_gc_end_commit - garbage collection at end of commit. 910 * @c: UBIFS file-system description object 911 * 912 * This function completes out-of-place garbage collection of index LEBs. 913 */ 914 int ubifs_gc_end_commit(struct ubifs_info *c) 915 { 916 struct ubifs_gced_idx_leb *idx_gc, *tmp; 917 struct ubifs_wbuf *wbuf; 918 int err = 0; 919 920 wbuf = &c->jheads[GCHD].wbuf; 921 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead); 922 list_for_each_entry_safe(idx_gc, tmp, &c->idx_gc, list) 923 if (idx_gc->unmap) { 924 dbg_gc("LEB %d", idx_gc->lnum); 925 err = ubifs_leb_unmap(c, idx_gc->lnum); 926 if (err) 927 goto out; 928 err = ubifs_change_one_lp(c, idx_gc->lnum, LPROPS_NC, 929 LPROPS_NC, 0, LPROPS_TAKEN, -1); 930 if (err) 931 goto out; 932 list_del(&idx_gc->list); 933 kfree(idx_gc); 934 } 935 out: 936 mutex_unlock(&wbuf->io_mutex); 937 return err; 938 } 939 940 /** 941 * ubifs_destroy_idx_gc - destroy idx_gc list. 942 * @c: UBIFS file-system description object 943 * 944 * This function destroys the @c->idx_gc list. It is called when unmounting 945 * so locks are not needed. Returns zero in case of success and a negative 946 * error code in case of failure. 947 */ 948 void ubifs_destroy_idx_gc(struct ubifs_info *c) 949 { 950 while (!list_empty(&c->idx_gc)) { 951 struct ubifs_gced_idx_leb *idx_gc; 952 953 idx_gc = list_entry(c->idx_gc.next, struct ubifs_gced_idx_leb, 954 list); 955 c->idx_gc_cnt -= 1; 956 list_del(&idx_gc->list); 957 kfree(idx_gc); 958 } 959 } 960 961 /** 962 * ubifs_get_idx_gc_leb - get a LEB from GC'd index LEB list. 963 * @c: UBIFS file-system description object 964 * 965 * Called during start commit so locks are not needed. 966 */ 967 int ubifs_get_idx_gc_leb(struct ubifs_info *c) 968 { 969 struct ubifs_gced_idx_leb *idx_gc; 970 int lnum; 971 972 if (list_empty(&c->idx_gc)) 973 return -ENOSPC; 974 idx_gc = list_entry(c->idx_gc.next, struct ubifs_gced_idx_leb, list); 975 lnum = idx_gc->lnum; 976 /* c->idx_gc_cnt is updated by the caller when lprops are updated */ 977 list_del(&idx_gc->list); 978 kfree(idx_gc); 979 return lnum; 980 } 981