1 /* 2 * YAFFS: Yet Another Flash File System. A NAND-flash specific file system. 3 * 4 * Copyright (C) 2002-2011 Aleph One Ltd. 5 * for Toby Churchill Ltd and Brightstar Engineering 6 * 7 * Created by Charles Manning <charles@aleph1.co.uk> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 */ 13 14 #include "yportenv.h" 15 #include "yaffs_trace.h" 16 17 #include "yaffs_guts.h" 18 #include "yaffs_getblockinfo.h" 19 #include "yaffs_tagscompat.h" 20 #include "yaffs_nand.h" 21 #include "yaffs_yaffs1.h" 22 #include "yaffs_yaffs2.h" 23 #include "yaffs_bitmap.h" 24 #include "yaffs_verify.h" 25 #include "yaffs_nand.h" 26 #include "yaffs_packedtags2.h" 27 #include "yaffs_nameval.h" 28 #include "yaffs_allocator.h" 29 #include "yaffs_attribs.h" 30 #include "yaffs_summary.h" 31 32 /* Note YAFFS_GC_GOOD_ENOUGH must be <= YAFFS_GC_PASSIVE_THRESHOLD */ 33 #define YAFFS_GC_GOOD_ENOUGH 2 34 #define YAFFS_GC_PASSIVE_THRESHOLD 4 35 36 #include "yaffs_ecc.h" 37 38 /* Forward declarations */ 39 40 static int yaffs_wr_data_obj(struct yaffs_obj *in, int inode_chunk, 41 const u8 *buffer, int n_bytes, int use_reserve); 42 43 44 45 /* Function to calculate chunk and offset */ 46 47 void yaffs_addr_to_chunk(struct yaffs_dev *dev, loff_t addr, 48 int *chunk_out, u32 *offset_out) 49 { 50 int chunk; 51 u32 offset; 52 53 chunk = (u32) (addr >> dev->chunk_shift); 54 55 if (dev->chunk_div == 1) { 56 /* easy power of 2 case */ 57 offset = (u32) (addr & dev->chunk_mask); 58 } else { 59 /* Non power-of-2 case */ 60 61 loff_t chunk_base; 62 63 chunk /= dev->chunk_div; 64 65 chunk_base = ((loff_t) chunk) * dev->data_bytes_per_chunk; 66 offset = (u32) (addr - chunk_base); 67 } 68 69 *chunk_out = chunk; 70 *offset_out = offset; 71 } 72 73 /* Function to return the number of shifts for a power of 2 greater than or 74 * equal to the given number 75 * Note we don't try to cater for all possible numbers and this does not have to 76 * be hellishly efficient. 77 */ 78 79 static inline u32 calc_shifts_ceiling(u32 x) 80 { 81 int extra_bits; 82 int shifts; 83 84 shifts = extra_bits = 0; 85 86 while (x > 1) { 87 if (x & 1) 88 extra_bits++; 89 x >>= 1; 90 shifts++; 91 } 92 93 if (extra_bits) 94 shifts++; 95 96 return shifts; 97 } 98 99 /* Function to return the number of shifts to get a 1 in bit 0 100 */ 101 102 static inline u32 calc_shifts(u32 x) 103 { 104 u32 shifts; 105 106 shifts = 0; 107 108 if (!x) 109 return 0; 110 111 while (!(x & 1)) { 112 x >>= 1; 113 shifts++; 114 } 115 116 return shifts; 117 } 118 119 /* 120 * Temporary buffer manipulations. 121 */ 122 123 static int yaffs_init_tmp_buffers(struct yaffs_dev *dev) 124 { 125 int i; 126 u8 *buf = (u8 *) 1; 127 128 memset(dev->temp_buffer, 0, sizeof(dev->temp_buffer)); 129 130 for (i = 0; buf && i < YAFFS_N_TEMP_BUFFERS; i++) { 131 dev->temp_buffer[i].in_use = 0; 132 buf = kmalloc(dev->param.total_bytes_per_chunk, GFP_NOFS); 133 dev->temp_buffer[i].buffer = buf; 134 } 135 136 return buf ? YAFFS_OK : YAFFS_FAIL; 137 } 138 139 u8 *yaffs_get_temp_buffer(struct yaffs_dev * dev) 140 { 141 int i; 142 143 dev->temp_in_use++; 144 if (dev->temp_in_use > dev->max_temp) 145 dev->max_temp = dev->temp_in_use; 146 147 for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) { 148 if (dev->temp_buffer[i].in_use == 0) { 149 dev->temp_buffer[i].in_use = 1; 150 return dev->temp_buffer[i].buffer; 151 } 152 } 153 154 yaffs_trace(YAFFS_TRACE_BUFFERS, "Out of temp buffers"); 155 /* 156 * If we got here then we have to allocate an unmanaged one 157 * This is not good. 158 */ 159 160 dev->unmanaged_buffer_allocs++; 161 return kmalloc(dev->data_bytes_per_chunk, GFP_NOFS); 162 163 } 164 165 void yaffs_release_temp_buffer(struct yaffs_dev *dev, u8 *buffer) 166 { 167 int i; 168 169 dev->temp_in_use--; 170 171 for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) { 172 if (dev->temp_buffer[i].buffer == buffer) { 173 dev->temp_buffer[i].in_use = 0; 174 return; 175 } 176 } 177 178 if (buffer) { 179 /* assume it is an unmanaged one. */ 180 yaffs_trace(YAFFS_TRACE_BUFFERS, 181 "Releasing unmanaged temp buffer"); 182 kfree(buffer); 183 dev->unmanaged_buffer_deallocs++; 184 } 185 186 } 187 188 /* 189 * Determine if we have a managed buffer. 190 */ 191 int yaffs_is_managed_tmp_buffer(struct yaffs_dev *dev, const u8 *buffer) 192 { 193 int i; 194 195 for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) { 196 if (dev->temp_buffer[i].buffer == buffer) 197 return 1; 198 } 199 200 for (i = 0; i < dev->param.n_caches; i++) { 201 if (dev->cache[i].data == buffer) 202 return 1; 203 } 204 205 if (buffer == dev->checkpt_buffer) 206 return 1; 207 208 yaffs_trace(YAFFS_TRACE_ALWAYS, 209 "yaffs: unmaged buffer detected."); 210 return 0; 211 } 212 213 /* 214 * Functions for robustisizing TODO 215 * 216 */ 217 218 static void yaffs_handle_chunk_wr_ok(struct yaffs_dev *dev, int nand_chunk, 219 const u8 *data, 220 const struct yaffs_ext_tags *tags) 221 { 222 dev = dev; 223 nand_chunk = nand_chunk; 224 data = data; 225 tags = tags; 226 } 227 228 static void yaffs_handle_chunk_update(struct yaffs_dev *dev, int nand_chunk, 229 const struct yaffs_ext_tags *tags) 230 { 231 dev = dev; 232 nand_chunk = nand_chunk; 233 tags = tags; 234 } 235 236 void yaffs_handle_chunk_error(struct yaffs_dev *dev, 237 struct yaffs_block_info *bi) 238 { 239 if (!bi->gc_prioritise) { 240 bi->gc_prioritise = 1; 241 dev->has_pending_prioritised_gc = 1; 242 bi->chunk_error_strikes++; 243 244 if (bi->chunk_error_strikes > 3) { 245 bi->needs_retiring = 1; /* Too many stikes, so retire */ 246 yaffs_trace(YAFFS_TRACE_ALWAYS, 247 "yaffs: Block struck out"); 248 249 } 250 } 251 } 252 253 static void yaffs_handle_chunk_wr_error(struct yaffs_dev *dev, int nand_chunk, 254 int erased_ok) 255 { 256 int flash_block = nand_chunk / dev->param.chunks_per_block; 257 struct yaffs_block_info *bi = yaffs_get_block_info(dev, flash_block); 258 259 yaffs_handle_chunk_error(dev, bi); 260 261 if (erased_ok) { 262 /* Was an actual write failure, 263 * so mark the block for retirement.*/ 264 bi->needs_retiring = 1; 265 yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS, 266 "**>> Block %d needs retiring", flash_block); 267 } 268 269 /* Delete the chunk */ 270 yaffs_chunk_del(dev, nand_chunk, 1, __LINE__); 271 yaffs_skip_rest_of_block(dev); 272 } 273 274 /* 275 * Verification code 276 */ 277 278 /* 279 * Simple hash function. Needs to have a reasonable spread 280 */ 281 282 static inline int yaffs_hash_fn(int n) 283 { 284 if (n < 0) 285 n = -n; 286 return n % YAFFS_NOBJECT_BUCKETS; 287 } 288 289 /* 290 * Access functions to useful fake objects. 291 * Note that root might have a presence in NAND if permissions are set. 292 */ 293 294 struct yaffs_obj *yaffs_root(struct yaffs_dev *dev) 295 { 296 return dev->root_dir; 297 } 298 299 struct yaffs_obj *yaffs_lost_n_found(struct yaffs_dev *dev) 300 { 301 return dev->lost_n_found; 302 } 303 304 /* 305 * Erased NAND checking functions 306 */ 307 308 int yaffs_check_ff(u8 *buffer, int n_bytes) 309 { 310 /* Horrible, slow implementation */ 311 while (n_bytes--) { 312 if (*buffer != 0xff) 313 return 0; 314 buffer++; 315 } 316 return 1; 317 } 318 319 static int yaffs_check_chunk_erased(struct yaffs_dev *dev, int nand_chunk) 320 { 321 int retval = YAFFS_OK; 322 u8 *data = yaffs_get_temp_buffer(dev); 323 struct yaffs_ext_tags tags; 324 325 yaffs_rd_chunk_tags_nand(dev, nand_chunk, data, &tags); 326 327 if (tags.ecc_result > YAFFS_ECC_RESULT_NO_ERROR) 328 retval = YAFFS_FAIL; 329 330 if (!yaffs_check_ff(data, dev->data_bytes_per_chunk) || 331 tags.chunk_used) { 332 yaffs_trace(YAFFS_TRACE_NANDACCESS, 333 "Chunk %d not erased", nand_chunk); 334 retval = YAFFS_FAIL; 335 } 336 337 yaffs_release_temp_buffer(dev, data); 338 339 return retval; 340 341 } 342 343 static int yaffs_verify_chunk_written(struct yaffs_dev *dev, 344 int nand_chunk, 345 const u8 *data, 346 struct yaffs_ext_tags *tags) 347 { 348 int retval = YAFFS_OK; 349 struct yaffs_ext_tags temp_tags; 350 u8 *buffer = yaffs_get_temp_buffer(dev); 351 352 yaffs_rd_chunk_tags_nand(dev, nand_chunk, buffer, &temp_tags); 353 if (memcmp(buffer, data, dev->data_bytes_per_chunk) || 354 temp_tags.obj_id != tags->obj_id || 355 temp_tags.chunk_id != tags->chunk_id || 356 temp_tags.n_bytes != tags->n_bytes) 357 retval = YAFFS_FAIL; 358 359 yaffs_release_temp_buffer(dev, buffer); 360 361 return retval; 362 } 363 364 365 int yaffs_check_alloc_available(struct yaffs_dev *dev, int n_chunks) 366 { 367 int reserved_chunks; 368 int reserved_blocks = dev->param.n_reserved_blocks; 369 int checkpt_blocks; 370 371 checkpt_blocks = yaffs_calc_checkpt_blocks_required(dev); 372 373 reserved_chunks = 374 (reserved_blocks + checkpt_blocks) * dev->param.chunks_per_block; 375 376 return (dev->n_free_chunks > (reserved_chunks + n_chunks)); 377 } 378 379 static int yaffs_find_alloc_block(struct yaffs_dev *dev) 380 { 381 int i; 382 struct yaffs_block_info *bi; 383 384 if (dev->n_erased_blocks < 1) { 385 /* Hoosterman we've got a problem. 386 * Can't get space to gc 387 */ 388 yaffs_trace(YAFFS_TRACE_ERROR, 389 "yaffs tragedy: no more erased blocks"); 390 391 return -1; 392 } 393 394 /* Find an empty block. */ 395 396 for (i = dev->internal_start_block; i <= dev->internal_end_block; i++) { 397 dev->alloc_block_finder++; 398 if (dev->alloc_block_finder < dev->internal_start_block 399 || dev->alloc_block_finder > dev->internal_end_block) { 400 dev->alloc_block_finder = dev->internal_start_block; 401 } 402 403 bi = yaffs_get_block_info(dev, dev->alloc_block_finder); 404 405 if (bi->block_state == YAFFS_BLOCK_STATE_EMPTY) { 406 bi->block_state = YAFFS_BLOCK_STATE_ALLOCATING; 407 dev->seq_number++; 408 bi->seq_number = dev->seq_number; 409 dev->n_erased_blocks--; 410 yaffs_trace(YAFFS_TRACE_ALLOCATE, 411 "Allocated block %d, seq %d, %d left" , 412 dev->alloc_block_finder, dev->seq_number, 413 dev->n_erased_blocks); 414 return dev->alloc_block_finder; 415 } 416 } 417 418 yaffs_trace(YAFFS_TRACE_ALWAYS, 419 "yaffs tragedy: no more erased blocks, but there should have been %d", 420 dev->n_erased_blocks); 421 422 return -1; 423 } 424 425 static int yaffs_alloc_chunk(struct yaffs_dev *dev, int use_reserver, 426 struct yaffs_block_info **block_ptr) 427 { 428 int ret_val; 429 struct yaffs_block_info *bi; 430 431 if (dev->alloc_block < 0) { 432 /* Get next block to allocate off */ 433 dev->alloc_block = yaffs_find_alloc_block(dev); 434 dev->alloc_page = 0; 435 } 436 437 if (!use_reserver && !yaffs_check_alloc_available(dev, 1)) { 438 /* No space unless we're allowed to use the reserve. */ 439 return -1; 440 } 441 442 if (dev->n_erased_blocks < dev->param.n_reserved_blocks 443 && dev->alloc_page == 0) 444 yaffs_trace(YAFFS_TRACE_ALLOCATE, "Allocating reserve"); 445 446 /* Next page please.... */ 447 if (dev->alloc_block >= 0) { 448 bi = yaffs_get_block_info(dev, dev->alloc_block); 449 450 ret_val = (dev->alloc_block * dev->param.chunks_per_block) + 451 dev->alloc_page; 452 bi->pages_in_use++; 453 yaffs_set_chunk_bit(dev, dev->alloc_block, dev->alloc_page); 454 455 dev->alloc_page++; 456 457 dev->n_free_chunks--; 458 459 /* If the block is full set the state to full */ 460 if (dev->alloc_page >= dev->param.chunks_per_block) { 461 bi->block_state = YAFFS_BLOCK_STATE_FULL; 462 dev->alloc_block = -1; 463 } 464 465 if (block_ptr) 466 *block_ptr = bi; 467 468 return ret_val; 469 } 470 471 yaffs_trace(YAFFS_TRACE_ERROR, 472 "!!!!!!!!! Allocator out !!!!!!!!!!!!!!!!!"); 473 474 return -1; 475 } 476 477 static int yaffs_get_erased_chunks(struct yaffs_dev *dev) 478 { 479 int n; 480 481 n = dev->n_erased_blocks * dev->param.chunks_per_block; 482 483 if (dev->alloc_block > 0) 484 n += (dev->param.chunks_per_block - dev->alloc_page); 485 486 return n; 487 488 } 489 490 /* 491 * yaffs_skip_rest_of_block() skips over the rest of the allocation block 492 * if we don't want to write to it. 493 */ 494 void yaffs_skip_rest_of_block(struct yaffs_dev *dev) 495 { 496 struct yaffs_block_info *bi; 497 498 if (dev->alloc_block > 0) { 499 bi = yaffs_get_block_info(dev, dev->alloc_block); 500 if (bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING) { 501 bi->block_state = YAFFS_BLOCK_STATE_FULL; 502 dev->alloc_block = -1; 503 } 504 } 505 } 506 507 static int yaffs_write_new_chunk(struct yaffs_dev *dev, 508 const u8 *data, 509 struct yaffs_ext_tags *tags, int use_reserver) 510 { 511 int attempts = 0; 512 int write_ok = 0; 513 int chunk; 514 515 yaffs2_checkpt_invalidate(dev); 516 517 do { 518 struct yaffs_block_info *bi = 0; 519 int erased_ok = 0; 520 521 chunk = yaffs_alloc_chunk(dev, use_reserver, &bi); 522 if (chunk < 0) { 523 /* no space */ 524 break; 525 } 526 527 /* First check this chunk is erased, if it needs 528 * checking. The checking policy (unless forced 529 * always on) is as follows: 530 * 531 * Check the first page we try to write in a block. 532 * If the check passes then we don't need to check any 533 * more. If the check fails, we check again... 534 * If the block has been erased, we don't need to check. 535 * 536 * However, if the block has been prioritised for gc, 537 * then we think there might be something odd about 538 * this block and stop using it. 539 * 540 * Rationale: We should only ever see chunks that have 541 * not been erased if there was a partially written 542 * chunk due to power loss. This checking policy should 543 * catch that case with very few checks and thus save a 544 * lot of checks that are most likely not needed. 545 * 546 * Mods to the above 547 * If an erase check fails or the write fails we skip the 548 * rest of the block. 549 */ 550 551 /* let's give it a try */ 552 attempts++; 553 554 if (dev->param.always_check_erased) 555 bi->skip_erased_check = 0; 556 557 if (!bi->skip_erased_check) { 558 erased_ok = yaffs_check_chunk_erased(dev, chunk); 559 if (erased_ok != YAFFS_OK) { 560 yaffs_trace(YAFFS_TRACE_ERROR, 561 "**>> yaffs chunk %d was not erased", 562 chunk); 563 564 /* If not erased, delete this one, 565 * skip rest of block and 566 * try another chunk */ 567 yaffs_chunk_del(dev, chunk, 1, __LINE__); 568 yaffs_skip_rest_of_block(dev); 569 continue; 570 } 571 } 572 573 write_ok = yaffs_wr_chunk_tags_nand(dev, chunk, data, tags); 574 575 if (!bi->skip_erased_check) 576 write_ok = 577 yaffs_verify_chunk_written(dev, chunk, data, tags); 578 579 if (write_ok != YAFFS_OK) { 580 /* Clean up aborted write, skip to next block and 581 * try another chunk */ 582 yaffs_handle_chunk_wr_error(dev, chunk, erased_ok); 583 continue; 584 } 585 586 bi->skip_erased_check = 1; 587 588 /* Copy the data into the robustification buffer */ 589 yaffs_handle_chunk_wr_ok(dev, chunk, data, tags); 590 591 } while (write_ok != YAFFS_OK && 592 (yaffs_wr_attempts <= 0 || attempts <= yaffs_wr_attempts)); 593 594 if (!write_ok) 595 chunk = -1; 596 597 if (attempts > 1) { 598 yaffs_trace(YAFFS_TRACE_ERROR, 599 "**>> yaffs write required %d attempts", 600 attempts); 601 dev->n_retried_writes += (attempts - 1); 602 } 603 604 return chunk; 605 } 606 607 /* 608 * Block retiring for handling a broken block. 609 */ 610 611 static void yaffs_retire_block(struct yaffs_dev *dev, int flash_block) 612 { 613 struct yaffs_block_info *bi = yaffs_get_block_info(dev, flash_block); 614 615 yaffs2_checkpt_invalidate(dev); 616 617 yaffs2_clear_oldest_dirty_seq(dev, bi); 618 619 if (yaffs_mark_bad(dev, flash_block) != YAFFS_OK) { 620 if (yaffs_erase_block(dev, flash_block) != YAFFS_OK) { 621 yaffs_trace(YAFFS_TRACE_ALWAYS, 622 "yaffs: Failed to mark bad and erase block %d", 623 flash_block); 624 } else { 625 struct yaffs_ext_tags tags; 626 int chunk_id = 627 flash_block * dev->param.chunks_per_block; 628 629 u8 *buffer = yaffs_get_temp_buffer(dev); 630 631 memset(buffer, 0xff, dev->data_bytes_per_chunk); 632 memset(&tags, 0, sizeof(tags)); 633 tags.seq_number = YAFFS_SEQUENCE_BAD_BLOCK; 634 if (dev->param.write_chunk_tags_fn(dev, chunk_id - 635 dev->chunk_offset, 636 buffer, 637 &tags) != YAFFS_OK) 638 yaffs_trace(YAFFS_TRACE_ALWAYS, 639 "yaffs: Failed to write bad block marker to block %d", 640 flash_block); 641 642 yaffs_release_temp_buffer(dev, buffer); 643 } 644 } 645 646 bi->block_state = YAFFS_BLOCK_STATE_DEAD; 647 bi->gc_prioritise = 0; 648 bi->needs_retiring = 0; 649 650 dev->n_retired_blocks++; 651 } 652 653 /*---------------- Name handling functions ------------*/ 654 655 static u16 yaffs_calc_name_sum(const YCHAR *name) 656 { 657 u16 sum = 0; 658 u16 i = 1; 659 660 if (!name) 661 return 0; 662 663 while ((*name) && i < (YAFFS_MAX_NAME_LENGTH / 2)) { 664 665 /* 0x1f mask is case insensitive */ 666 sum += ((*name) & 0x1f) * i; 667 i++; 668 name++; 669 } 670 return sum; 671 } 672 673 void yaffs_set_obj_name(struct yaffs_obj *obj, const YCHAR * name) 674 { 675 memset(obj->short_name, 0, sizeof(obj->short_name)); 676 if (name && 677 yaffs_strnlen(name, YAFFS_SHORT_NAME_LENGTH + 1) <= 678 YAFFS_SHORT_NAME_LENGTH) 679 yaffs_strcpy(obj->short_name, name); 680 else 681 obj->short_name[0] = _Y('\0'); 682 obj->sum = yaffs_calc_name_sum(name); 683 } 684 685 void yaffs_set_obj_name_from_oh(struct yaffs_obj *obj, 686 const struct yaffs_obj_hdr *oh) 687 { 688 #ifdef CONFIG_YAFFS_AUTO_UNICODE 689 YCHAR tmp_name[YAFFS_MAX_NAME_LENGTH + 1]; 690 memset(tmp_name, 0, sizeof(tmp_name)); 691 yaffs_load_name_from_oh(obj->my_dev, tmp_name, oh->name, 692 YAFFS_MAX_NAME_LENGTH + 1); 693 yaffs_set_obj_name(obj, tmp_name); 694 #else 695 yaffs_set_obj_name(obj, oh->name); 696 #endif 697 } 698 699 loff_t yaffs_max_file_size(struct yaffs_dev *dev) 700 { 701 return ((loff_t) YAFFS_MAX_CHUNK_ID) * dev->data_bytes_per_chunk; 702 } 703 704 /*-------------------- TNODES ------------------- 705 706 * List of spare tnodes 707 * The list is hooked together using the first pointer 708 * in the tnode. 709 */ 710 711 struct yaffs_tnode *yaffs_get_tnode(struct yaffs_dev *dev) 712 { 713 struct yaffs_tnode *tn = yaffs_alloc_raw_tnode(dev); 714 715 if (tn) { 716 memset(tn, 0, dev->tnode_size); 717 dev->n_tnodes++; 718 } 719 720 dev->checkpoint_blocks_required = 0; /* force recalculation */ 721 722 return tn; 723 } 724 725 /* FreeTnode frees up a tnode and puts it back on the free list */ 726 static void yaffs_free_tnode(struct yaffs_dev *dev, struct yaffs_tnode *tn) 727 { 728 yaffs_free_raw_tnode(dev, tn); 729 dev->n_tnodes--; 730 dev->checkpoint_blocks_required = 0; /* force recalculation */ 731 } 732 733 static void yaffs_deinit_tnodes_and_objs(struct yaffs_dev *dev) 734 { 735 yaffs_deinit_raw_tnodes_and_objs(dev); 736 dev->n_obj = 0; 737 dev->n_tnodes = 0; 738 } 739 740 void yaffs_load_tnode_0(struct yaffs_dev *dev, struct yaffs_tnode *tn, 741 unsigned pos, unsigned val) 742 { 743 u32 *map = (u32 *) tn; 744 u32 bit_in_map; 745 u32 bit_in_word; 746 u32 word_in_map; 747 u32 mask; 748 749 pos &= YAFFS_TNODES_LEVEL0_MASK; 750 val >>= dev->chunk_grp_bits; 751 752 bit_in_map = pos * dev->tnode_width; 753 word_in_map = bit_in_map / 32; 754 bit_in_word = bit_in_map & (32 - 1); 755 756 mask = dev->tnode_mask << bit_in_word; 757 758 map[word_in_map] &= ~mask; 759 map[word_in_map] |= (mask & (val << bit_in_word)); 760 761 if (dev->tnode_width > (32 - bit_in_word)) { 762 bit_in_word = (32 - bit_in_word); 763 word_in_map++; 764 mask = 765 dev->tnode_mask >> bit_in_word; 766 map[word_in_map] &= ~mask; 767 map[word_in_map] |= (mask & (val >> bit_in_word)); 768 } 769 } 770 771 u32 yaffs_get_group_base(struct yaffs_dev *dev, struct yaffs_tnode *tn, 772 unsigned pos) 773 { 774 u32 *map = (u32 *) tn; 775 u32 bit_in_map; 776 u32 bit_in_word; 777 u32 word_in_map; 778 u32 val; 779 780 pos &= YAFFS_TNODES_LEVEL0_MASK; 781 782 bit_in_map = pos * dev->tnode_width; 783 word_in_map = bit_in_map / 32; 784 bit_in_word = bit_in_map & (32 - 1); 785 786 val = map[word_in_map] >> bit_in_word; 787 788 if (dev->tnode_width > (32 - bit_in_word)) { 789 bit_in_word = (32 - bit_in_word); 790 word_in_map++; 791 val |= (map[word_in_map] << bit_in_word); 792 } 793 794 val &= dev->tnode_mask; 795 val <<= dev->chunk_grp_bits; 796 797 return val; 798 } 799 800 /* ------------------- End of individual tnode manipulation -----------------*/ 801 802 /* ---------Functions to manipulate the look-up tree (made up of tnodes) ------ 803 * The look up tree is represented by the top tnode and the number of top_level 804 * in the tree. 0 means only the level 0 tnode is in the tree. 805 */ 806 807 /* FindLevel0Tnode finds the level 0 tnode, if one exists. */ 808 struct yaffs_tnode *yaffs_find_tnode_0(struct yaffs_dev *dev, 809 struct yaffs_file_var *file_struct, 810 u32 chunk_id) 811 { 812 struct yaffs_tnode *tn = file_struct->top; 813 u32 i; 814 int required_depth; 815 int level = file_struct->top_level; 816 817 dev = dev; 818 819 /* Check sane level and chunk Id */ 820 if (level < 0 || level > YAFFS_TNODES_MAX_LEVEL) 821 return NULL; 822 823 if (chunk_id > YAFFS_MAX_CHUNK_ID) 824 return NULL; 825 826 /* First check we're tall enough (ie enough top_level) */ 827 828 i = chunk_id >> YAFFS_TNODES_LEVEL0_BITS; 829 required_depth = 0; 830 while (i) { 831 i >>= YAFFS_TNODES_INTERNAL_BITS; 832 required_depth++; 833 } 834 835 if (required_depth > file_struct->top_level) 836 return NULL; /* Not tall enough, so we can't find it */ 837 838 /* Traverse down to level 0 */ 839 while (level > 0 && tn) { 840 tn = tn->internal[(chunk_id >> 841 (YAFFS_TNODES_LEVEL0_BITS + 842 (level - 1) * 843 YAFFS_TNODES_INTERNAL_BITS)) & 844 YAFFS_TNODES_INTERNAL_MASK]; 845 level--; 846 } 847 848 return tn; 849 } 850 851 /* add_find_tnode_0 finds the level 0 tnode if it exists, 852 * otherwise first expands the tree. 853 * This happens in two steps: 854 * 1. If the tree isn't tall enough, then make it taller. 855 * 2. Scan down the tree towards the level 0 tnode adding tnodes if required. 856 * 857 * Used when modifying the tree. 858 * 859 * If the tn argument is NULL, then a fresh tnode will be added otherwise the 860 * specified tn will be plugged into the ttree. 861 */ 862 863 struct yaffs_tnode *yaffs_add_find_tnode_0(struct yaffs_dev *dev, 864 struct yaffs_file_var *file_struct, 865 u32 chunk_id, 866 struct yaffs_tnode *passed_tn) 867 { 868 int required_depth; 869 int i; 870 int l; 871 struct yaffs_tnode *tn; 872 u32 x; 873 874 /* Check sane level and page Id */ 875 if (file_struct->top_level < 0 || 876 file_struct->top_level > YAFFS_TNODES_MAX_LEVEL) 877 return NULL; 878 879 if (chunk_id > YAFFS_MAX_CHUNK_ID) 880 return NULL; 881 882 /* First check we're tall enough (ie enough top_level) */ 883 884 x = chunk_id >> YAFFS_TNODES_LEVEL0_BITS; 885 required_depth = 0; 886 while (x) { 887 x >>= YAFFS_TNODES_INTERNAL_BITS; 888 required_depth++; 889 } 890 891 if (required_depth > file_struct->top_level) { 892 /* Not tall enough, gotta make the tree taller */ 893 for (i = file_struct->top_level; i < required_depth; i++) { 894 895 tn = yaffs_get_tnode(dev); 896 897 if (tn) { 898 tn->internal[0] = file_struct->top; 899 file_struct->top = tn; 900 file_struct->top_level++; 901 } else { 902 yaffs_trace(YAFFS_TRACE_ERROR, 903 "yaffs: no more tnodes"); 904 return NULL; 905 } 906 } 907 } 908 909 /* Traverse down to level 0, adding anything we need */ 910 911 l = file_struct->top_level; 912 tn = file_struct->top; 913 914 if (l > 0) { 915 while (l > 0 && tn) { 916 x = (chunk_id >> 917 (YAFFS_TNODES_LEVEL0_BITS + 918 (l - 1) * YAFFS_TNODES_INTERNAL_BITS)) & 919 YAFFS_TNODES_INTERNAL_MASK; 920 921 if ((l > 1) && !tn->internal[x]) { 922 /* Add missing non-level-zero tnode */ 923 tn->internal[x] = yaffs_get_tnode(dev); 924 if (!tn->internal[x]) 925 return NULL; 926 } else if (l == 1) { 927 /* Looking from level 1 at level 0 */ 928 if (passed_tn) { 929 /* If we already have one, release it */ 930 if (tn->internal[x]) 931 yaffs_free_tnode(dev, 932 tn->internal[x]); 933 tn->internal[x] = passed_tn; 934 935 } else if (!tn->internal[x]) { 936 /* Don't have one, none passed in */ 937 tn->internal[x] = yaffs_get_tnode(dev); 938 if (!tn->internal[x]) 939 return NULL; 940 } 941 } 942 943 tn = tn->internal[x]; 944 l--; 945 } 946 } else { 947 /* top is level 0 */ 948 if (passed_tn) { 949 memcpy(tn, passed_tn, 950 (dev->tnode_width * YAFFS_NTNODES_LEVEL0) / 8); 951 yaffs_free_tnode(dev, passed_tn); 952 } 953 } 954 955 return tn; 956 } 957 958 static int yaffs_tags_match(const struct yaffs_ext_tags *tags, int obj_id, 959 int chunk_obj) 960 { 961 return (tags->chunk_id == chunk_obj && 962 tags->obj_id == obj_id && 963 !tags->is_deleted) ? 1 : 0; 964 965 } 966 967 static int yaffs_find_chunk_in_group(struct yaffs_dev *dev, int the_chunk, 968 struct yaffs_ext_tags *tags, int obj_id, 969 int inode_chunk) 970 { 971 int j; 972 973 for (j = 0; the_chunk && j < dev->chunk_grp_size; j++) { 974 if (yaffs_check_chunk_bit 975 (dev, the_chunk / dev->param.chunks_per_block, 976 the_chunk % dev->param.chunks_per_block)) { 977 978 if (dev->chunk_grp_size == 1) 979 return the_chunk; 980 else { 981 yaffs_rd_chunk_tags_nand(dev, the_chunk, NULL, 982 tags); 983 if (yaffs_tags_match(tags, 984 obj_id, inode_chunk)) { 985 /* found it; */ 986 return the_chunk; 987 } 988 } 989 } 990 the_chunk++; 991 } 992 return -1; 993 } 994 995 static int yaffs_find_chunk_in_file(struct yaffs_obj *in, int inode_chunk, 996 struct yaffs_ext_tags *tags) 997 { 998 /*Get the Tnode, then get the level 0 offset chunk offset */ 999 struct yaffs_tnode *tn; 1000 int the_chunk = -1; 1001 struct yaffs_ext_tags local_tags; 1002 int ret_val = -1; 1003 struct yaffs_dev *dev = in->my_dev; 1004 1005 if (!tags) { 1006 /* Passed a NULL, so use our own tags space */ 1007 tags = &local_tags; 1008 } 1009 1010 tn = yaffs_find_tnode_0(dev, &in->variant.file_variant, inode_chunk); 1011 1012 if (!tn) 1013 return ret_val; 1014 1015 the_chunk = yaffs_get_group_base(dev, tn, inode_chunk); 1016 1017 ret_val = yaffs_find_chunk_in_group(dev, the_chunk, tags, in->obj_id, 1018 inode_chunk); 1019 return ret_val; 1020 } 1021 1022 static int yaffs_find_del_file_chunk(struct yaffs_obj *in, int inode_chunk, 1023 struct yaffs_ext_tags *tags) 1024 { 1025 /* Get the Tnode, then get the level 0 offset chunk offset */ 1026 struct yaffs_tnode *tn; 1027 int the_chunk = -1; 1028 struct yaffs_ext_tags local_tags; 1029 struct yaffs_dev *dev = in->my_dev; 1030 int ret_val = -1; 1031 1032 if (!tags) { 1033 /* Passed a NULL, so use our own tags space */ 1034 tags = &local_tags; 1035 } 1036 1037 tn = yaffs_find_tnode_0(dev, &in->variant.file_variant, inode_chunk); 1038 1039 if (!tn) 1040 return ret_val; 1041 1042 the_chunk = yaffs_get_group_base(dev, tn, inode_chunk); 1043 1044 ret_val = yaffs_find_chunk_in_group(dev, the_chunk, tags, in->obj_id, 1045 inode_chunk); 1046 1047 /* Delete the entry in the filestructure (if found) */ 1048 if (ret_val != -1) 1049 yaffs_load_tnode_0(dev, tn, inode_chunk, 0); 1050 1051 return ret_val; 1052 } 1053 1054 int yaffs_put_chunk_in_file(struct yaffs_obj *in, int inode_chunk, 1055 int nand_chunk, int in_scan) 1056 { 1057 /* NB in_scan is zero unless scanning. 1058 * For forward scanning, in_scan is > 0; 1059 * for backward scanning in_scan is < 0 1060 * 1061 * nand_chunk = 0 is a dummy insert to make sure the tnodes are there. 1062 */ 1063 1064 struct yaffs_tnode *tn; 1065 struct yaffs_dev *dev = in->my_dev; 1066 int existing_cunk; 1067 struct yaffs_ext_tags existing_tags; 1068 struct yaffs_ext_tags new_tags; 1069 unsigned existing_serial, new_serial; 1070 1071 if (in->variant_type != YAFFS_OBJECT_TYPE_FILE) { 1072 /* Just ignore an attempt at putting a chunk into a non-file 1073 * during scanning. 1074 * If it is not during Scanning then something went wrong! 1075 */ 1076 if (!in_scan) { 1077 yaffs_trace(YAFFS_TRACE_ERROR, 1078 "yaffs tragedy:attempt to put data chunk into a non-file" 1079 ); 1080 BUG(); 1081 } 1082 1083 yaffs_chunk_del(dev, nand_chunk, 1, __LINE__); 1084 return YAFFS_OK; 1085 } 1086 1087 tn = yaffs_add_find_tnode_0(dev, 1088 &in->variant.file_variant, 1089 inode_chunk, NULL); 1090 if (!tn) 1091 return YAFFS_FAIL; 1092 1093 if (!nand_chunk) 1094 /* Dummy insert, bail now */ 1095 return YAFFS_OK; 1096 1097 existing_cunk = yaffs_get_group_base(dev, tn, inode_chunk); 1098 1099 if (in_scan != 0) { 1100 /* If we're scanning then we need to test for duplicates 1101 * NB This does not need to be efficient since it should only 1102 * happen when the power fails during a write, then only one 1103 * chunk should ever be affected. 1104 * 1105 * Correction for YAFFS2: This could happen quite a lot and we 1106 * need to think about efficiency! TODO 1107 * Update: For backward scanning we don't need to re-read tags 1108 * so this is quite cheap. 1109 */ 1110 1111 if (existing_cunk > 0) { 1112 /* NB Right now existing chunk will not be real 1113 * chunk_id if the chunk group size > 1 1114 * thus we have to do a FindChunkInFile to get the 1115 * real chunk id. 1116 * 1117 * We have a duplicate now we need to decide which 1118 * one to use: 1119 * 1120 * Backwards scanning YAFFS2: The old one is what 1121 * we use, dump the new one. 1122 * YAFFS1: Get both sets of tags and compare serial 1123 * numbers. 1124 */ 1125 1126 if (in_scan > 0) { 1127 /* Only do this for forward scanning */ 1128 yaffs_rd_chunk_tags_nand(dev, 1129 nand_chunk, 1130 NULL, &new_tags); 1131 1132 /* Do a proper find */ 1133 existing_cunk = 1134 yaffs_find_chunk_in_file(in, inode_chunk, 1135 &existing_tags); 1136 } 1137 1138 if (existing_cunk <= 0) { 1139 /*Hoosterman - how did this happen? */ 1140 1141 yaffs_trace(YAFFS_TRACE_ERROR, 1142 "yaffs tragedy: existing chunk < 0 in scan" 1143 ); 1144 1145 } 1146 1147 /* NB The deleted flags should be false, otherwise 1148 * the chunks will not be loaded during a scan 1149 */ 1150 1151 if (in_scan > 0) { 1152 new_serial = new_tags.serial_number; 1153 existing_serial = existing_tags.serial_number; 1154 } 1155 1156 if ((in_scan > 0) && 1157 (existing_cunk <= 0 || 1158 ((existing_serial + 1) & 3) == new_serial)) { 1159 /* Forward scanning. 1160 * Use new 1161 * Delete the old one and drop through to 1162 * update the tnode 1163 */ 1164 yaffs_chunk_del(dev, existing_cunk, 1, 1165 __LINE__); 1166 } else { 1167 /* Backward scanning or we want to use the 1168 * existing one 1169 * Delete the new one and return early so that 1170 * the tnode isn't changed 1171 */ 1172 yaffs_chunk_del(dev, nand_chunk, 1, __LINE__); 1173 return YAFFS_OK; 1174 } 1175 } 1176 1177 } 1178 1179 if (existing_cunk == 0) 1180 in->n_data_chunks++; 1181 1182 yaffs_load_tnode_0(dev, tn, inode_chunk, nand_chunk); 1183 1184 return YAFFS_OK; 1185 } 1186 1187 static void yaffs_soft_del_chunk(struct yaffs_dev *dev, int chunk) 1188 { 1189 struct yaffs_block_info *the_block; 1190 unsigned block_no; 1191 1192 yaffs_trace(YAFFS_TRACE_DELETION, "soft delete chunk %d", chunk); 1193 1194 block_no = chunk / dev->param.chunks_per_block; 1195 the_block = yaffs_get_block_info(dev, block_no); 1196 if (the_block) { 1197 the_block->soft_del_pages++; 1198 dev->n_free_chunks++; 1199 yaffs2_update_oldest_dirty_seq(dev, block_no, the_block); 1200 } 1201 } 1202 1203 /* SoftDeleteWorker scans backwards through the tnode tree and soft deletes all 1204 * the chunks in the file. 1205 * All soft deleting does is increment the block's softdelete count and pulls 1206 * the chunk out of the tnode. 1207 * Thus, essentially this is the same as DeleteWorker except that the chunks 1208 * are soft deleted. 1209 */ 1210 1211 static int yaffs_soft_del_worker(struct yaffs_obj *in, struct yaffs_tnode *tn, 1212 u32 level, int chunk_offset) 1213 { 1214 int i; 1215 int the_chunk; 1216 int all_done = 1; 1217 struct yaffs_dev *dev = in->my_dev; 1218 1219 if (!tn) 1220 return 1; 1221 1222 if (level > 0) { 1223 for (i = YAFFS_NTNODES_INTERNAL - 1; 1224 all_done && i >= 0; 1225 i--) { 1226 if (tn->internal[i]) { 1227 all_done = 1228 yaffs_soft_del_worker(in, 1229 tn->internal[i], 1230 level - 1, 1231 (chunk_offset << 1232 YAFFS_TNODES_INTERNAL_BITS) 1233 + i); 1234 if (all_done) { 1235 yaffs_free_tnode(dev, 1236 tn->internal[i]); 1237 tn->internal[i] = NULL; 1238 } else { 1239 /* Can this happen? */ 1240 } 1241 } 1242 } 1243 return (all_done) ? 1 : 0; 1244 } 1245 1246 /* level 0 */ 1247 for (i = YAFFS_NTNODES_LEVEL0 - 1; i >= 0; i--) { 1248 the_chunk = yaffs_get_group_base(dev, tn, i); 1249 if (the_chunk) { 1250 yaffs_soft_del_chunk(dev, the_chunk); 1251 yaffs_load_tnode_0(dev, tn, i, 0); 1252 } 1253 } 1254 return 1; 1255 } 1256 1257 static void yaffs_remove_obj_from_dir(struct yaffs_obj *obj) 1258 { 1259 struct yaffs_dev *dev = obj->my_dev; 1260 struct yaffs_obj *parent; 1261 1262 yaffs_verify_obj_in_dir(obj); 1263 parent = obj->parent; 1264 1265 yaffs_verify_dir(parent); 1266 1267 if (dev && dev->param.remove_obj_fn) 1268 dev->param.remove_obj_fn(obj); 1269 1270 list_del_init(&obj->siblings); 1271 obj->parent = NULL; 1272 1273 yaffs_verify_dir(parent); 1274 } 1275 1276 void yaffs_add_obj_to_dir(struct yaffs_obj *directory, struct yaffs_obj *obj) 1277 { 1278 if (!directory) { 1279 yaffs_trace(YAFFS_TRACE_ALWAYS, 1280 "tragedy: Trying to add an object to a null pointer directory" 1281 ); 1282 BUG(); 1283 return; 1284 } 1285 if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) { 1286 yaffs_trace(YAFFS_TRACE_ALWAYS, 1287 "tragedy: Trying to add an object to a non-directory" 1288 ); 1289 BUG(); 1290 } 1291 1292 if (obj->siblings.prev == NULL) { 1293 /* Not initialised */ 1294 BUG(); 1295 } 1296 1297 yaffs_verify_dir(directory); 1298 1299 yaffs_remove_obj_from_dir(obj); 1300 1301 /* Now add it */ 1302 list_add(&obj->siblings, &directory->variant.dir_variant.children); 1303 obj->parent = directory; 1304 1305 if (directory == obj->my_dev->unlinked_dir 1306 || directory == obj->my_dev->del_dir) { 1307 obj->unlinked = 1; 1308 obj->my_dev->n_unlinked_files++; 1309 obj->rename_allowed = 0; 1310 } 1311 1312 yaffs_verify_dir(directory); 1313 yaffs_verify_obj_in_dir(obj); 1314 } 1315 1316 static int yaffs_change_obj_name(struct yaffs_obj *obj, 1317 struct yaffs_obj *new_dir, 1318 const YCHAR *new_name, int force, int shadows) 1319 { 1320 int unlink_op; 1321 int del_op; 1322 struct yaffs_obj *existing_target; 1323 1324 if (new_dir == NULL) 1325 new_dir = obj->parent; /* use the old directory */ 1326 1327 if (new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) { 1328 yaffs_trace(YAFFS_TRACE_ALWAYS, 1329 "tragedy: yaffs_change_obj_name: new_dir is not a directory" 1330 ); 1331 BUG(); 1332 } 1333 1334 unlink_op = (new_dir == obj->my_dev->unlinked_dir); 1335 del_op = (new_dir == obj->my_dev->del_dir); 1336 1337 existing_target = yaffs_find_by_name(new_dir, new_name); 1338 1339 /* If the object is a file going into the unlinked directory, 1340 * then it is OK to just stuff it in since duplicate names are OK. 1341 * else only proceed if the new name does not exist and we're putting 1342 * it into a directory. 1343 */ 1344 if (!(unlink_op || del_op || force || 1345 shadows > 0 || !existing_target) || 1346 new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) 1347 return YAFFS_FAIL; 1348 1349 yaffs_set_obj_name(obj, new_name); 1350 obj->dirty = 1; 1351 yaffs_add_obj_to_dir(new_dir, obj); 1352 1353 if (unlink_op) 1354 obj->unlinked = 1; 1355 1356 /* If it is a deletion then we mark it as a shrink for gc */ 1357 if (yaffs_update_oh(obj, new_name, 0, del_op, shadows, NULL) >= 0) 1358 return YAFFS_OK; 1359 1360 return YAFFS_FAIL; 1361 } 1362 1363 /*------------------------ Short Operations Cache ------------------------------ 1364 * In many situations where there is no high level buffering a lot of 1365 * reads might be short sequential reads, and a lot of writes may be short 1366 * sequential writes. eg. scanning/writing a jpeg file. 1367 * In these cases, a short read/write cache can provide a huge perfomance 1368 * benefit with dumb-as-a-rock code. 1369 * In Linux, the page cache provides read buffering and the short op cache 1370 * provides write buffering. 1371 * 1372 * There are a small number (~10) of cache chunks per device so that we don't 1373 * need a very intelligent search. 1374 */ 1375 1376 static int yaffs_obj_cache_dirty(struct yaffs_obj *obj) 1377 { 1378 struct yaffs_dev *dev = obj->my_dev; 1379 int i; 1380 struct yaffs_cache *cache; 1381 int n_caches = obj->my_dev->param.n_caches; 1382 1383 for (i = 0; i < n_caches; i++) { 1384 cache = &dev->cache[i]; 1385 if (cache->object == obj && cache->dirty) 1386 return 1; 1387 } 1388 1389 return 0; 1390 } 1391 1392 static void yaffs_flush_file_cache(struct yaffs_obj *obj) 1393 { 1394 struct yaffs_dev *dev = obj->my_dev; 1395 int lowest = -99; /* Stop compiler whining. */ 1396 int i; 1397 struct yaffs_cache *cache; 1398 int chunk_written = 0; 1399 int n_caches = obj->my_dev->param.n_caches; 1400 1401 if (n_caches < 1) 1402 return; 1403 do { 1404 cache = NULL; 1405 1406 /* Find the lowest dirty chunk for this object */ 1407 for (i = 0; i < n_caches; i++) { 1408 if (dev->cache[i].object == obj && 1409 dev->cache[i].dirty) { 1410 if (!cache || 1411 dev->cache[i].chunk_id < lowest) { 1412 cache = &dev->cache[i]; 1413 lowest = cache->chunk_id; 1414 } 1415 } 1416 } 1417 1418 if (cache && !cache->locked) { 1419 /* Write it out and free it up */ 1420 chunk_written = 1421 yaffs_wr_data_obj(cache->object, 1422 cache->chunk_id, 1423 cache->data, 1424 cache->n_bytes, 1); 1425 cache->dirty = 0; 1426 cache->object = NULL; 1427 } 1428 } while (cache && chunk_written > 0); 1429 1430 if (cache) 1431 /* Hoosterman, disk full while writing cache out. */ 1432 yaffs_trace(YAFFS_TRACE_ERROR, 1433 "yaffs tragedy: no space during cache write"); 1434 } 1435 1436 /*yaffs_flush_whole_cache(dev) 1437 * 1438 * 1439 */ 1440 1441 void yaffs_flush_whole_cache(struct yaffs_dev *dev) 1442 { 1443 struct yaffs_obj *obj; 1444 int n_caches = dev->param.n_caches; 1445 int i; 1446 1447 /* Find a dirty object in the cache and flush it... 1448 * until there are no further dirty objects. 1449 */ 1450 do { 1451 obj = NULL; 1452 for (i = 0; i < n_caches && !obj; i++) { 1453 if (dev->cache[i].object && dev->cache[i].dirty) 1454 obj = dev->cache[i].object; 1455 } 1456 if (obj) 1457 yaffs_flush_file_cache(obj); 1458 } while (obj); 1459 1460 } 1461 1462 /* Grab us a cache chunk for use. 1463 * First look for an empty one. 1464 * Then look for the least recently used non-dirty one. 1465 * Then look for the least recently used dirty one...., flush and look again. 1466 */ 1467 static struct yaffs_cache *yaffs_grab_chunk_worker(struct yaffs_dev *dev) 1468 { 1469 int i; 1470 1471 if (dev->param.n_caches > 0) { 1472 for (i = 0; i < dev->param.n_caches; i++) { 1473 if (!dev->cache[i].object) 1474 return &dev->cache[i]; 1475 } 1476 } 1477 return NULL; 1478 } 1479 1480 static struct yaffs_cache *yaffs_grab_chunk_cache(struct yaffs_dev *dev) 1481 { 1482 struct yaffs_cache *cache; 1483 struct yaffs_obj *the_obj; 1484 int usage; 1485 int i; 1486 1487 if (dev->param.n_caches < 1) 1488 return NULL; 1489 1490 /* Try find a non-dirty one... */ 1491 1492 cache = yaffs_grab_chunk_worker(dev); 1493 1494 if (!cache) { 1495 /* They were all dirty, find the LRU object and flush 1496 * its cache, then find again. 1497 * NB what's here is not very accurate, 1498 * we actually flush the object with the LRU chunk. 1499 */ 1500 1501 /* With locking we can't assume we can use entry zero, 1502 * Set the_obj to a valid pointer for Coverity. */ 1503 the_obj = dev->cache[0].object; 1504 usage = -1; 1505 cache = NULL; 1506 1507 for (i = 0; i < dev->param.n_caches; i++) { 1508 if (dev->cache[i].object && 1509 !dev->cache[i].locked && 1510 (dev->cache[i].last_use < usage || 1511 !cache)) { 1512 usage = dev->cache[i].last_use; 1513 the_obj = dev->cache[i].object; 1514 cache = &dev->cache[i]; 1515 } 1516 } 1517 1518 if (!cache || cache->dirty) { 1519 /* Flush and try again */ 1520 yaffs_flush_file_cache(the_obj); 1521 cache = yaffs_grab_chunk_worker(dev); 1522 } 1523 } 1524 return cache; 1525 } 1526 1527 /* Find a cached chunk */ 1528 static struct yaffs_cache *yaffs_find_chunk_cache(const struct yaffs_obj *obj, 1529 int chunk_id) 1530 { 1531 struct yaffs_dev *dev = obj->my_dev; 1532 int i; 1533 1534 if (dev->param.n_caches < 1) 1535 return NULL; 1536 1537 for (i = 0; i < dev->param.n_caches; i++) { 1538 if (dev->cache[i].object == obj && 1539 dev->cache[i].chunk_id == chunk_id) { 1540 dev->cache_hits++; 1541 1542 return &dev->cache[i]; 1543 } 1544 } 1545 return NULL; 1546 } 1547 1548 /* Mark the chunk for the least recently used algorithym */ 1549 static void yaffs_use_cache(struct yaffs_dev *dev, struct yaffs_cache *cache, 1550 int is_write) 1551 { 1552 int i; 1553 1554 if (dev->param.n_caches < 1) 1555 return; 1556 1557 if (dev->cache_last_use < 0 || 1558 dev->cache_last_use > 100000000) { 1559 /* Reset the cache usages */ 1560 for (i = 1; i < dev->param.n_caches; i++) 1561 dev->cache[i].last_use = 0; 1562 1563 dev->cache_last_use = 0; 1564 } 1565 dev->cache_last_use++; 1566 cache->last_use = dev->cache_last_use; 1567 1568 if (is_write) 1569 cache->dirty = 1; 1570 } 1571 1572 /* Invalidate a single cache page. 1573 * Do this when a whole page gets written, 1574 * ie the short cache for this page is no longer valid. 1575 */ 1576 static void yaffs_invalidate_chunk_cache(struct yaffs_obj *object, int chunk_id) 1577 { 1578 struct yaffs_cache *cache; 1579 1580 if (object->my_dev->param.n_caches > 0) { 1581 cache = yaffs_find_chunk_cache(object, chunk_id); 1582 1583 if (cache) 1584 cache->object = NULL; 1585 } 1586 } 1587 1588 /* Invalidate all the cache pages associated with this object 1589 * Do this whenever ther file is deleted or resized. 1590 */ 1591 static void yaffs_invalidate_whole_cache(struct yaffs_obj *in) 1592 { 1593 int i; 1594 struct yaffs_dev *dev = in->my_dev; 1595 1596 if (dev->param.n_caches > 0) { 1597 /* Invalidate it. */ 1598 for (i = 0; i < dev->param.n_caches; i++) { 1599 if (dev->cache[i].object == in) 1600 dev->cache[i].object = NULL; 1601 } 1602 } 1603 } 1604 1605 static void yaffs_unhash_obj(struct yaffs_obj *obj) 1606 { 1607 int bucket; 1608 struct yaffs_dev *dev = obj->my_dev; 1609 1610 /* If it is still linked into the bucket list, free from the list */ 1611 if (!list_empty(&obj->hash_link)) { 1612 list_del_init(&obj->hash_link); 1613 bucket = yaffs_hash_fn(obj->obj_id); 1614 dev->obj_bucket[bucket].count--; 1615 } 1616 } 1617 1618 /* FreeObject frees up a Object and puts it back on the free list */ 1619 static void yaffs_free_obj(struct yaffs_obj *obj) 1620 { 1621 struct yaffs_dev *dev; 1622 1623 if (!obj) { 1624 BUG(); 1625 return; 1626 } 1627 dev = obj->my_dev; 1628 yaffs_trace(YAFFS_TRACE_OS, "FreeObject %p inode %p", 1629 obj, obj->my_inode); 1630 if (obj->parent) 1631 BUG(); 1632 if (!list_empty(&obj->siblings)) 1633 BUG(); 1634 1635 if (obj->my_inode) { 1636 /* We're still hooked up to a cached inode. 1637 * Don't delete now, but mark for later deletion 1638 */ 1639 obj->defered_free = 1; 1640 return; 1641 } 1642 1643 yaffs_unhash_obj(obj); 1644 1645 yaffs_free_raw_obj(dev, obj); 1646 dev->n_obj--; 1647 dev->checkpoint_blocks_required = 0; /* force recalculation */ 1648 } 1649 1650 void yaffs_handle_defered_free(struct yaffs_obj *obj) 1651 { 1652 if (obj->defered_free) 1653 yaffs_free_obj(obj); 1654 } 1655 1656 static int yaffs_generic_obj_del(struct yaffs_obj *in) 1657 { 1658 /* Iinvalidate the file's data in the cache, without flushing. */ 1659 yaffs_invalidate_whole_cache(in); 1660 1661 if (in->my_dev->param.is_yaffs2 && in->parent != in->my_dev->del_dir) { 1662 /* Move to unlinked directory so we have a deletion record */ 1663 yaffs_change_obj_name(in, in->my_dev->del_dir, _Y("deleted"), 0, 1664 0); 1665 } 1666 1667 yaffs_remove_obj_from_dir(in); 1668 yaffs_chunk_del(in->my_dev, in->hdr_chunk, 1, __LINE__); 1669 in->hdr_chunk = 0; 1670 1671 yaffs_free_obj(in); 1672 return YAFFS_OK; 1673 1674 } 1675 1676 static void yaffs_soft_del_file(struct yaffs_obj *obj) 1677 { 1678 if (!obj->deleted || 1679 obj->variant_type != YAFFS_OBJECT_TYPE_FILE || 1680 obj->soft_del) 1681 return; 1682 1683 if (obj->n_data_chunks <= 0) { 1684 /* Empty file with no duplicate object headers, 1685 * just delete it immediately */ 1686 yaffs_free_tnode(obj->my_dev, obj->variant.file_variant.top); 1687 obj->variant.file_variant.top = NULL; 1688 yaffs_trace(YAFFS_TRACE_TRACING, 1689 "yaffs: Deleting empty file %d", 1690 obj->obj_id); 1691 yaffs_generic_obj_del(obj); 1692 } else { 1693 yaffs_soft_del_worker(obj, 1694 obj->variant.file_variant.top, 1695 obj->variant. 1696 file_variant.top_level, 0); 1697 obj->soft_del = 1; 1698 } 1699 } 1700 1701 /* Pruning removes any part of the file structure tree that is beyond the 1702 * bounds of the file (ie that does not point to chunks). 1703 * 1704 * A file should only get pruned when its size is reduced. 1705 * 1706 * Before pruning, the chunks must be pulled from the tree and the 1707 * level 0 tnode entries must be zeroed out. 1708 * Could also use this for file deletion, but that's probably better handled 1709 * by a special case. 1710 * 1711 * This function is recursive. For levels > 0 the function is called again on 1712 * any sub-tree. For level == 0 we just check if the sub-tree has data. 1713 * If there is no data in a subtree then it is pruned. 1714 */ 1715 1716 static struct yaffs_tnode *yaffs_prune_worker(struct yaffs_dev *dev, 1717 struct yaffs_tnode *tn, u32 level, 1718 int del0) 1719 { 1720 int i; 1721 int has_data; 1722 1723 if (!tn) 1724 return tn; 1725 1726 has_data = 0; 1727 1728 if (level > 0) { 1729 for (i = 0; i < YAFFS_NTNODES_INTERNAL; i++) { 1730 if (tn->internal[i]) { 1731 tn->internal[i] = 1732 yaffs_prune_worker(dev, 1733 tn->internal[i], 1734 level - 1, 1735 (i == 0) ? del0 : 1); 1736 } 1737 1738 if (tn->internal[i]) 1739 has_data++; 1740 } 1741 } else { 1742 int tnode_size_u32 = dev->tnode_size / sizeof(u32); 1743 u32 *map = (u32 *) tn; 1744 1745 for (i = 0; !has_data && i < tnode_size_u32; i++) { 1746 if (map[i]) 1747 has_data++; 1748 } 1749 } 1750 1751 if (has_data == 0 && del0) { 1752 /* Free and return NULL */ 1753 yaffs_free_tnode(dev, tn); 1754 tn = NULL; 1755 } 1756 return tn; 1757 } 1758 1759 static int yaffs_prune_tree(struct yaffs_dev *dev, 1760 struct yaffs_file_var *file_struct) 1761 { 1762 int i; 1763 int has_data; 1764 int done = 0; 1765 struct yaffs_tnode *tn; 1766 1767 if (file_struct->top_level < 1) 1768 return YAFFS_OK; 1769 1770 file_struct->top = 1771 yaffs_prune_worker(dev, file_struct->top, file_struct->top_level, 0); 1772 1773 /* Now we have a tree with all the non-zero branches NULL but 1774 * the height is the same as it was. 1775 * Let's see if we can trim internal tnodes to shorten the tree. 1776 * We can do this if only the 0th element in the tnode is in use 1777 * (ie all the non-zero are NULL) 1778 */ 1779 1780 while (file_struct->top_level && !done) { 1781 tn = file_struct->top; 1782 1783 has_data = 0; 1784 for (i = 1; i < YAFFS_NTNODES_INTERNAL; i++) { 1785 if (tn->internal[i]) 1786 has_data++; 1787 } 1788 1789 if (!has_data) { 1790 file_struct->top = tn->internal[0]; 1791 file_struct->top_level--; 1792 yaffs_free_tnode(dev, tn); 1793 } else { 1794 done = 1; 1795 } 1796 } 1797 1798 return YAFFS_OK; 1799 } 1800 1801 /*-------------------- End of File Structure functions.-------------------*/ 1802 1803 /* alloc_empty_obj gets us a clean Object.*/ 1804 static struct yaffs_obj *yaffs_alloc_empty_obj(struct yaffs_dev *dev) 1805 { 1806 struct yaffs_obj *obj = yaffs_alloc_raw_obj(dev); 1807 1808 if (!obj) 1809 return obj; 1810 1811 dev->n_obj++; 1812 1813 /* Now sweeten it up... */ 1814 1815 memset(obj, 0, sizeof(struct yaffs_obj)); 1816 obj->being_created = 1; 1817 1818 obj->my_dev = dev; 1819 obj->hdr_chunk = 0; 1820 obj->variant_type = YAFFS_OBJECT_TYPE_UNKNOWN; 1821 INIT_LIST_HEAD(&(obj->hard_links)); 1822 INIT_LIST_HEAD(&(obj->hash_link)); 1823 INIT_LIST_HEAD(&obj->siblings); 1824 1825 /* Now make the directory sane */ 1826 if (dev->root_dir) { 1827 obj->parent = dev->root_dir; 1828 list_add(&(obj->siblings), 1829 &dev->root_dir->variant.dir_variant.children); 1830 } 1831 1832 /* Add it to the lost and found directory. 1833 * NB Can't put root or lost-n-found in lost-n-found so 1834 * check if lost-n-found exists first 1835 */ 1836 if (dev->lost_n_found) 1837 yaffs_add_obj_to_dir(dev->lost_n_found, obj); 1838 1839 obj->being_created = 0; 1840 1841 dev->checkpoint_blocks_required = 0; /* force recalculation */ 1842 1843 return obj; 1844 } 1845 1846 static int yaffs_find_nice_bucket(struct yaffs_dev *dev) 1847 { 1848 int i; 1849 int l = 999; 1850 int lowest = 999999; 1851 1852 /* Search for the shortest list or one that 1853 * isn't too long. 1854 */ 1855 1856 for (i = 0; i < 10 && lowest > 4; i++) { 1857 dev->bucket_finder++; 1858 dev->bucket_finder %= YAFFS_NOBJECT_BUCKETS; 1859 if (dev->obj_bucket[dev->bucket_finder].count < lowest) { 1860 lowest = dev->obj_bucket[dev->bucket_finder].count; 1861 l = dev->bucket_finder; 1862 } 1863 } 1864 1865 return l; 1866 } 1867 1868 static int yaffs_new_obj_id(struct yaffs_dev *dev) 1869 { 1870 int bucket = yaffs_find_nice_bucket(dev); 1871 int found = 0; 1872 struct list_head *i; 1873 u32 n = (u32) bucket; 1874 1875 /* Now find an object value that has not already been taken 1876 * by scanning the list. 1877 */ 1878 1879 while (!found) { 1880 found = 1; 1881 n += YAFFS_NOBJECT_BUCKETS; 1882 if (1 || dev->obj_bucket[bucket].count > 0) { 1883 list_for_each(i, &dev->obj_bucket[bucket].list) { 1884 /* If there is already one in the list */ 1885 if (i && list_entry(i, struct yaffs_obj, 1886 hash_link)->obj_id == n) { 1887 found = 0; 1888 } 1889 } 1890 } 1891 } 1892 return n; 1893 } 1894 1895 static void yaffs_hash_obj(struct yaffs_obj *in) 1896 { 1897 int bucket = yaffs_hash_fn(in->obj_id); 1898 struct yaffs_dev *dev = in->my_dev; 1899 1900 list_add(&in->hash_link, &dev->obj_bucket[bucket].list); 1901 dev->obj_bucket[bucket].count++; 1902 } 1903 1904 struct yaffs_obj *yaffs_find_by_number(struct yaffs_dev *dev, u32 number) 1905 { 1906 int bucket = yaffs_hash_fn(number); 1907 struct list_head *i; 1908 struct yaffs_obj *in; 1909 1910 list_for_each(i, &dev->obj_bucket[bucket].list) { 1911 /* Look if it is in the list */ 1912 in = list_entry(i, struct yaffs_obj, hash_link); 1913 if (in->obj_id == number) { 1914 /* Don't show if it is defered free */ 1915 if (in->defered_free) 1916 return NULL; 1917 return in; 1918 } 1919 } 1920 1921 return NULL; 1922 } 1923 1924 struct yaffs_obj *yaffs_new_obj(struct yaffs_dev *dev, int number, 1925 enum yaffs_obj_type type) 1926 { 1927 struct yaffs_obj *the_obj = NULL; 1928 struct yaffs_tnode *tn = NULL; 1929 1930 if (number < 0) 1931 number = yaffs_new_obj_id(dev); 1932 1933 if (type == YAFFS_OBJECT_TYPE_FILE) { 1934 tn = yaffs_get_tnode(dev); 1935 if (!tn) 1936 return NULL; 1937 } 1938 1939 the_obj = yaffs_alloc_empty_obj(dev); 1940 if (!the_obj) { 1941 if (tn) 1942 yaffs_free_tnode(dev, tn); 1943 return NULL; 1944 } 1945 1946 the_obj->fake = 0; 1947 the_obj->rename_allowed = 1; 1948 the_obj->unlink_allowed = 1; 1949 the_obj->obj_id = number; 1950 yaffs_hash_obj(the_obj); 1951 the_obj->variant_type = type; 1952 yaffs_load_current_time(the_obj, 1, 1); 1953 1954 switch (type) { 1955 case YAFFS_OBJECT_TYPE_FILE: 1956 the_obj->variant.file_variant.file_size = 0; 1957 the_obj->variant.file_variant.scanned_size = 0; 1958 the_obj->variant.file_variant.shrink_size = 1959 yaffs_max_file_size(dev); 1960 the_obj->variant.file_variant.top_level = 0; 1961 the_obj->variant.file_variant.top = tn; 1962 break; 1963 case YAFFS_OBJECT_TYPE_DIRECTORY: 1964 INIT_LIST_HEAD(&the_obj->variant.dir_variant.children); 1965 INIT_LIST_HEAD(&the_obj->variant.dir_variant.dirty); 1966 break; 1967 case YAFFS_OBJECT_TYPE_SYMLINK: 1968 case YAFFS_OBJECT_TYPE_HARDLINK: 1969 case YAFFS_OBJECT_TYPE_SPECIAL: 1970 /* No action required */ 1971 break; 1972 case YAFFS_OBJECT_TYPE_UNKNOWN: 1973 /* todo this should not happen */ 1974 break; 1975 } 1976 return the_obj; 1977 } 1978 1979 static struct yaffs_obj *yaffs_create_fake_dir(struct yaffs_dev *dev, 1980 int number, u32 mode) 1981 { 1982 1983 struct yaffs_obj *obj = 1984 yaffs_new_obj(dev, number, YAFFS_OBJECT_TYPE_DIRECTORY); 1985 1986 if (!obj) 1987 return NULL; 1988 1989 obj->fake = 1; /* it is fake so it might not use NAND */ 1990 obj->rename_allowed = 0; 1991 obj->unlink_allowed = 0; 1992 obj->deleted = 0; 1993 obj->unlinked = 0; 1994 obj->yst_mode = mode; 1995 obj->my_dev = dev; 1996 obj->hdr_chunk = 0; /* Not a valid chunk. */ 1997 return obj; 1998 1999 } 2000 2001 2002 static void yaffs_init_tnodes_and_objs(struct yaffs_dev *dev) 2003 { 2004 int i; 2005 2006 dev->n_obj = 0; 2007 dev->n_tnodes = 0; 2008 yaffs_init_raw_tnodes_and_objs(dev); 2009 2010 for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) { 2011 INIT_LIST_HEAD(&dev->obj_bucket[i].list); 2012 dev->obj_bucket[i].count = 0; 2013 } 2014 } 2015 2016 struct yaffs_obj *yaffs_find_or_create_by_number(struct yaffs_dev *dev, 2017 int number, 2018 enum yaffs_obj_type type) 2019 { 2020 struct yaffs_obj *the_obj = NULL; 2021 2022 if (number > 0) 2023 the_obj = yaffs_find_by_number(dev, number); 2024 2025 if (!the_obj) 2026 the_obj = yaffs_new_obj(dev, number, type); 2027 2028 return the_obj; 2029 2030 } 2031 2032 YCHAR *yaffs_clone_str(const YCHAR *str) 2033 { 2034 YCHAR *new_str = NULL; 2035 int len; 2036 2037 if (!str) 2038 str = _Y(""); 2039 2040 len = yaffs_strnlen(str, YAFFS_MAX_ALIAS_LENGTH); 2041 new_str = kmalloc((len + 1) * sizeof(YCHAR), GFP_NOFS); 2042 if (new_str) { 2043 yaffs_strncpy(new_str, str, len); 2044 new_str[len] = 0; 2045 } 2046 return new_str; 2047 2048 } 2049 /* 2050 *yaffs_update_parent() handles fixing a directories mtime and ctime when a new 2051 * link (ie. name) is created or deleted in the directory. 2052 * 2053 * ie. 2054 * create dir/a : update dir's mtime/ctime 2055 * rm dir/a: update dir's mtime/ctime 2056 * modify dir/a: don't update dir's mtimme/ctime 2057 * 2058 * This can be handled immediately or defered. Defering helps reduce the number 2059 * of updates when many files in a directory are changed within a brief period. 2060 * 2061 * If the directory updating is defered then yaffs_update_dirty_dirs must be 2062 * called periodically. 2063 */ 2064 2065 static void yaffs_update_parent(struct yaffs_obj *obj) 2066 { 2067 struct yaffs_dev *dev; 2068 2069 if (!obj) 2070 return; 2071 dev = obj->my_dev; 2072 obj->dirty = 1; 2073 yaffs_load_current_time(obj, 0, 1); 2074 if (dev->param.defered_dir_update) { 2075 struct list_head *link = &obj->variant.dir_variant.dirty; 2076 2077 if (list_empty(link)) { 2078 list_add(link, &dev->dirty_dirs); 2079 yaffs_trace(YAFFS_TRACE_BACKGROUND, 2080 "Added object %d to dirty directories", 2081 obj->obj_id); 2082 } 2083 2084 } else { 2085 yaffs_update_oh(obj, NULL, 0, 0, 0, NULL); 2086 } 2087 } 2088 2089 void yaffs_update_dirty_dirs(struct yaffs_dev *dev) 2090 { 2091 struct list_head *link; 2092 struct yaffs_obj *obj; 2093 struct yaffs_dir_var *d_s; 2094 union yaffs_obj_var *o_v; 2095 2096 yaffs_trace(YAFFS_TRACE_BACKGROUND, "Update dirty directories"); 2097 2098 while (!list_empty(&dev->dirty_dirs)) { 2099 link = dev->dirty_dirs.next; 2100 list_del_init(link); 2101 2102 d_s = list_entry(link, struct yaffs_dir_var, dirty); 2103 o_v = list_entry(d_s, union yaffs_obj_var, dir_variant); 2104 obj = list_entry(o_v, struct yaffs_obj, variant); 2105 2106 yaffs_trace(YAFFS_TRACE_BACKGROUND, "Update directory %d", 2107 obj->obj_id); 2108 2109 if (obj->dirty) 2110 yaffs_update_oh(obj, NULL, 0, 0, 0, NULL); 2111 } 2112 } 2113 2114 /* 2115 * Mknod (create) a new object. 2116 * equiv_obj only has meaning for a hard link; 2117 * alias_str only has meaning for a symlink. 2118 * rdev only has meaning for devices (a subset of special objects) 2119 */ 2120 2121 static struct yaffs_obj *yaffs_create_obj(enum yaffs_obj_type type, 2122 struct yaffs_obj *parent, 2123 const YCHAR *name, 2124 u32 mode, 2125 u32 uid, 2126 u32 gid, 2127 struct yaffs_obj *equiv_obj, 2128 const YCHAR *alias_str, u32 rdev) 2129 { 2130 struct yaffs_obj *in; 2131 YCHAR *str = NULL; 2132 struct yaffs_dev *dev = parent->my_dev; 2133 2134 /* Check if the entry exists. 2135 * If it does then fail the call since we don't want a dup. */ 2136 if (yaffs_find_by_name(parent, name)) 2137 return NULL; 2138 2139 if (type == YAFFS_OBJECT_TYPE_SYMLINK) { 2140 str = yaffs_clone_str(alias_str); 2141 if (!str) 2142 return NULL; 2143 } 2144 2145 in = yaffs_new_obj(dev, -1, type); 2146 2147 if (!in) { 2148 kfree(str); 2149 return NULL; 2150 } 2151 2152 in->hdr_chunk = 0; 2153 in->valid = 1; 2154 in->variant_type = type; 2155 2156 in->yst_mode = mode; 2157 2158 yaffs_attribs_init(in, gid, uid, rdev); 2159 2160 in->n_data_chunks = 0; 2161 2162 yaffs_set_obj_name(in, name); 2163 in->dirty = 1; 2164 2165 yaffs_add_obj_to_dir(parent, in); 2166 2167 in->my_dev = parent->my_dev; 2168 2169 switch (type) { 2170 case YAFFS_OBJECT_TYPE_SYMLINK: 2171 in->variant.symlink_variant.alias = str; 2172 break; 2173 case YAFFS_OBJECT_TYPE_HARDLINK: 2174 in->variant.hardlink_variant.equiv_obj = equiv_obj; 2175 in->variant.hardlink_variant.equiv_id = equiv_obj->obj_id; 2176 list_add(&in->hard_links, &equiv_obj->hard_links); 2177 break; 2178 case YAFFS_OBJECT_TYPE_FILE: 2179 case YAFFS_OBJECT_TYPE_DIRECTORY: 2180 case YAFFS_OBJECT_TYPE_SPECIAL: 2181 case YAFFS_OBJECT_TYPE_UNKNOWN: 2182 /* do nothing */ 2183 break; 2184 } 2185 2186 if (yaffs_update_oh(in, name, 0, 0, 0, NULL) < 0) { 2187 /* Could not create the object header, fail */ 2188 yaffs_del_obj(in); 2189 in = NULL; 2190 } 2191 2192 if (in) 2193 yaffs_update_parent(parent); 2194 2195 return in; 2196 } 2197 2198 struct yaffs_obj *yaffs_create_file(struct yaffs_obj *parent, 2199 const YCHAR *name, u32 mode, u32 uid, 2200 u32 gid) 2201 { 2202 return yaffs_create_obj(YAFFS_OBJECT_TYPE_FILE, parent, name, mode, 2203 uid, gid, NULL, NULL, 0); 2204 } 2205 2206 struct yaffs_obj *yaffs_create_dir(struct yaffs_obj *parent, const YCHAR *name, 2207 u32 mode, u32 uid, u32 gid) 2208 { 2209 return yaffs_create_obj(YAFFS_OBJECT_TYPE_DIRECTORY, parent, name, 2210 mode, uid, gid, NULL, NULL, 0); 2211 } 2212 2213 struct yaffs_obj *yaffs_create_special(struct yaffs_obj *parent, 2214 const YCHAR *name, u32 mode, u32 uid, 2215 u32 gid, u32 rdev) 2216 { 2217 return yaffs_create_obj(YAFFS_OBJECT_TYPE_SPECIAL, parent, name, mode, 2218 uid, gid, NULL, NULL, rdev); 2219 } 2220 2221 struct yaffs_obj *yaffs_create_symlink(struct yaffs_obj *parent, 2222 const YCHAR *name, u32 mode, u32 uid, 2223 u32 gid, const YCHAR *alias) 2224 { 2225 return yaffs_create_obj(YAFFS_OBJECT_TYPE_SYMLINK, parent, name, mode, 2226 uid, gid, NULL, alias, 0); 2227 } 2228 2229 /* yaffs_link_obj returns the object id of the equivalent object.*/ 2230 struct yaffs_obj *yaffs_link_obj(struct yaffs_obj *parent, const YCHAR * name, 2231 struct yaffs_obj *equiv_obj) 2232 { 2233 /* Get the real object in case we were fed a hard link obj */ 2234 equiv_obj = yaffs_get_equivalent_obj(equiv_obj); 2235 2236 if (yaffs_create_obj(YAFFS_OBJECT_TYPE_HARDLINK, 2237 parent, name, 0, 0, 0, 2238 equiv_obj, NULL, 0)) 2239 return equiv_obj; 2240 2241 return NULL; 2242 2243 } 2244 2245 2246 2247 /*---------------------- Block Management and Page Allocation -------------*/ 2248 2249 static void yaffs_deinit_blocks(struct yaffs_dev *dev) 2250 { 2251 if (dev->block_info_alt && dev->block_info) 2252 vfree(dev->block_info); 2253 else 2254 kfree(dev->block_info); 2255 2256 dev->block_info_alt = 0; 2257 2258 dev->block_info = NULL; 2259 2260 if (dev->chunk_bits_alt && dev->chunk_bits) 2261 vfree(dev->chunk_bits); 2262 else 2263 kfree(dev->chunk_bits); 2264 dev->chunk_bits_alt = 0; 2265 dev->chunk_bits = NULL; 2266 } 2267 2268 static int yaffs_init_blocks(struct yaffs_dev *dev) 2269 { 2270 int n_blocks = dev->internal_end_block - dev->internal_start_block + 1; 2271 2272 dev->block_info = NULL; 2273 dev->chunk_bits = NULL; 2274 dev->alloc_block = -1; /* force it to get a new one */ 2275 2276 /* If the first allocation strategy fails, thry the alternate one */ 2277 dev->block_info = 2278 kmalloc(n_blocks * sizeof(struct yaffs_block_info), GFP_NOFS); 2279 if (!dev->block_info) { 2280 dev->block_info = 2281 vmalloc(n_blocks * sizeof(struct yaffs_block_info)); 2282 dev->block_info_alt = 1; 2283 } else { 2284 dev->block_info_alt = 0; 2285 } 2286 2287 if (!dev->block_info) 2288 goto alloc_error; 2289 2290 /* Set up dynamic blockinfo stuff. Round up bytes. */ 2291 dev->chunk_bit_stride = (dev->param.chunks_per_block + 7) / 8; 2292 dev->chunk_bits = 2293 kmalloc(dev->chunk_bit_stride * n_blocks, GFP_NOFS); 2294 if (!dev->chunk_bits) { 2295 dev->chunk_bits = 2296 vmalloc(dev->chunk_bit_stride * n_blocks); 2297 dev->chunk_bits_alt = 1; 2298 } else { 2299 dev->chunk_bits_alt = 0; 2300 } 2301 if (!dev->chunk_bits) 2302 goto alloc_error; 2303 2304 2305 memset(dev->block_info, 0, n_blocks * sizeof(struct yaffs_block_info)); 2306 memset(dev->chunk_bits, 0, dev->chunk_bit_stride * n_blocks); 2307 return YAFFS_OK; 2308 2309 alloc_error: 2310 yaffs_deinit_blocks(dev); 2311 return YAFFS_FAIL; 2312 } 2313 2314 2315 void yaffs_block_became_dirty(struct yaffs_dev *dev, int block_no) 2316 { 2317 struct yaffs_block_info *bi = yaffs_get_block_info(dev, block_no); 2318 int erased_ok = 0; 2319 int i; 2320 2321 /* If the block is still healthy erase it and mark as clean. 2322 * If the block has had a data failure, then retire it. 2323 */ 2324 2325 yaffs_trace(YAFFS_TRACE_GC | YAFFS_TRACE_ERASE, 2326 "yaffs_block_became_dirty block %d state %d %s", 2327 block_no, bi->block_state, 2328 (bi->needs_retiring) ? "needs retiring" : ""); 2329 2330 yaffs2_clear_oldest_dirty_seq(dev, bi); 2331 2332 bi->block_state = YAFFS_BLOCK_STATE_DIRTY; 2333 2334 /* If this is the block being garbage collected then stop gc'ing */ 2335 if (block_no == dev->gc_block) 2336 dev->gc_block = 0; 2337 2338 /* If this block is currently the best candidate for gc 2339 * then drop as a candidate */ 2340 if (block_no == dev->gc_dirtiest) { 2341 dev->gc_dirtiest = 0; 2342 dev->gc_pages_in_use = 0; 2343 } 2344 2345 if (!bi->needs_retiring) { 2346 yaffs2_checkpt_invalidate(dev); 2347 erased_ok = yaffs_erase_block(dev, block_no); 2348 if (!erased_ok) { 2349 dev->n_erase_failures++; 2350 yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS, 2351 "**>> Erasure failed %d", block_no); 2352 } 2353 } 2354 2355 /* Verify erasure if needed */ 2356 if (erased_ok && 2357 ((yaffs_trace_mask & YAFFS_TRACE_ERASE) || 2358 !yaffs_skip_verification(dev))) { 2359 for (i = 0; i < dev->param.chunks_per_block; i++) { 2360 if (!yaffs_check_chunk_erased(dev, 2361 block_no * dev->param.chunks_per_block + i)) { 2362 yaffs_trace(YAFFS_TRACE_ERROR, 2363 ">>Block %d erasure supposedly OK, but chunk %d not erased", 2364 block_no, i); 2365 } 2366 } 2367 } 2368 2369 if (!erased_ok) { 2370 /* We lost a block of free space */ 2371 dev->n_free_chunks -= dev->param.chunks_per_block; 2372 yaffs_retire_block(dev, block_no); 2373 yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS, 2374 "**>> Block %d retired", block_no); 2375 return; 2376 } 2377 2378 /* Clean it up... */ 2379 bi->block_state = YAFFS_BLOCK_STATE_EMPTY; 2380 bi->seq_number = 0; 2381 dev->n_erased_blocks++; 2382 bi->pages_in_use = 0; 2383 bi->soft_del_pages = 0; 2384 bi->has_shrink_hdr = 0; 2385 bi->skip_erased_check = 1; /* Clean, so no need to check */ 2386 bi->gc_prioritise = 0; 2387 bi->has_summary = 0; 2388 2389 yaffs_clear_chunk_bits(dev, block_no); 2390 2391 yaffs_trace(YAFFS_TRACE_ERASE, "Erased block %d", block_no); 2392 } 2393 2394 static inline int yaffs_gc_process_chunk(struct yaffs_dev *dev, 2395 struct yaffs_block_info *bi, 2396 int old_chunk, u8 *buffer) 2397 { 2398 int new_chunk; 2399 int mark_flash = 1; 2400 struct yaffs_ext_tags tags; 2401 struct yaffs_obj *object; 2402 int matching_chunk; 2403 int ret_val = YAFFS_OK; 2404 2405 memset(&tags, 0, sizeof(tags)); 2406 yaffs_rd_chunk_tags_nand(dev, old_chunk, 2407 buffer, &tags); 2408 object = yaffs_find_by_number(dev, tags.obj_id); 2409 2410 yaffs_trace(YAFFS_TRACE_GC_DETAIL, 2411 "Collecting chunk in block %d, %d %d %d ", 2412 dev->gc_chunk, tags.obj_id, 2413 tags.chunk_id, tags.n_bytes); 2414 2415 if (object && !yaffs_skip_verification(dev)) { 2416 if (tags.chunk_id == 0) 2417 matching_chunk = 2418 object->hdr_chunk; 2419 else if (object->soft_del) 2420 /* Defeat the test */ 2421 matching_chunk = old_chunk; 2422 else 2423 matching_chunk = 2424 yaffs_find_chunk_in_file 2425 (object, tags.chunk_id, 2426 NULL); 2427 2428 if (old_chunk != matching_chunk) 2429 yaffs_trace(YAFFS_TRACE_ERROR, 2430 "gc: page in gc mismatch: %d %d %d %d", 2431 old_chunk, 2432 matching_chunk, 2433 tags.obj_id, 2434 tags.chunk_id); 2435 } 2436 2437 if (!object) { 2438 yaffs_trace(YAFFS_TRACE_ERROR, 2439 "page %d in gc has no object: %d %d %d ", 2440 old_chunk, 2441 tags.obj_id, tags.chunk_id, 2442 tags.n_bytes); 2443 } 2444 2445 if (object && 2446 object->deleted && 2447 object->soft_del && tags.chunk_id != 0) { 2448 /* Data chunk in a soft deleted file, 2449 * throw it away. 2450 * It's a soft deleted data chunk, 2451 * No need to copy this, just forget 2452 * about it and fix up the object. 2453 */ 2454 2455 /* Free chunks already includes 2456 * softdeleted chunks, how ever this 2457 * chunk is going to soon be really 2458 * deleted which will increment free 2459 * chunks. We have to decrement free 2460 * chunks so this works out properly. 2461 */ 2462 dev->n_free_chunks--; 2463 bi->soft_del_pages--; 2464 2465 object->n_data_chunks--; 2466 if (object->n_data_chunks <= 0) { 2467 /* remeber to clean up obj */ 2468 dev->gc_cleanup_list[dev->n_clean_ups] = tags.obj_id; 2469 dev->n_clean_ups++; 2470 } 2471 mark_flash = 0; 2472 } else if (object) { 2473 /* It's either a data chunk in a live 2474 * file or an ObjectHeader, so we're 2475 * interested in it. 2476 * NB Need to keep the ObjectHeaders of 2477 * deleted files until the whole file 2478 * has been deleted off 2479 */ 2480 tags.serial_number++; 2481 dev->n_gc_copies++; 2482 2483 if (tags.chunk_id == 0) { 2484 /* It is an object Id, 2485 * We need to nuke the 2486 * shrinkheader flags since its 2487 * work is done. 2488 * Also need to clean up 2489 * shadowing. 2490 */ 2491 struct yaffs_obj_hdr *oh; 2492 oh = (struct yaffs_obj_hdr *) buffer; 2493 2494 oh->is_shrink = 0; 2495 tags.extra_is_shrink = 0; 2496 oh->shadows_obj = 0; 2497 oh->inband_shadowed_obj_id = 0; 2498 tags.extra_shadows = 0; 2499 2500 /* Update file size */ 2501 if (object->variant_type == YAFFS_OBJECT_TYPE_FILE) { 2502 yaffs_oh_size_load(oh, 2503 object->variant.file_variant.file_size); 2504 tags.extra_file_size = 2505 object->variant.file_variant.file_size; 2506 } 2507 2508 yaffs_verify_oh(object, oh, &tags, 1); 2509 new_chunk = 2510 yaffs_write_new_chunk(dev, (u8 *) oh, &tags, 1); 2511 } else { 2512 new_chunk = 2513 yaffs_write_new_chunk(dev, buffer, &tags, 1); 2514 } 2515 2516 if (new_chunk < 0) { 2517 ret_val = YAFFS_FAIL; 2518 } else { 2519 2520 /* Now fix up the Tnodes etc. */ 2521 2522 if (tags.chunk_id == 0) { 2523 /* It's a header */ 2524 object->hdr_chunk = new_chunk; 2525 object->serial = tags.serial_number; 2526 } else { 2527 /* It's a data chunk */ 2528 yaffs_put_chunk_in_file(object, tags.chunk_id, 2529 new_chunk, 0); 2530 } 2531 } 2532 } 2533 if (ret_val == YAFFS_OK) 2534 yaffs_chunk_del(dev, old_chunk, mark_flash, __LINE__); 2535 return ret_val; 2536 } 2537 2538 static int yaffs_gc_block(struct yaffs_dev *dev, int block, int whole_block) 2539 { 2540 int old_chunk; 2541 int ret_val = YAFFS_OK; 2542 int i; 2543 int is_checkpt_block; 2544 int max_copies; 2545 int chunks_before = yaffs_get_erased_chunks(dev); 2546 int chunks_after; 2547 struct yaffs_block_info *bi = yaffs_get_block_info(dev, block); 2548 2549 is_checkpt_block = (bi->block_state == YAFFS_BLOCK_STATE_CHECKPOINT); 2550 2551 yaffs_trace(YAFFS_TRACE_TRACING, 2552 "Collecting block %d, in use %d, shrink %d, whole_block %d", 2553 block, bi->pages_in_use, bi->has_shrink_hdr, 2554 whole_block); 2555 2556 /*yaffs_verify_free_chunks(dev); */ 2557 2558 if (bi->block_state == YAFFS_BLOCK_STATE_FULL) 2559 bi->block_state = YAFFS_BLOCK_STATE_COLLECTING; 2560 2561 bi->has_shrink_hdr = 0; /* clear the flag so that the block can erase */ 2562 2563 dev->gc_disable = 1; 2564 2565 yaffs_summary_gc(dev, block); 2566 2567 if (is_checkpt_block || !yaffs_still_some_chunks(dev, block)) { 2568 yaffs_trace(YAFFS_TRACE_TRACING, 2569 "Collecting block %d that has no chunks in use", 2570 block); 2571 yaffs_block_became_dirty(dev, block); 2572 } else { 2573 2574 u8 *buffer = yaffs_get_temp_buffer(dev); 2575 2576 yaffs_verify_blk(dev, bi, block); 2577 2578 max_copies = (whole_block) ? dev->param.chunks_per_block : 5; 2579 old_chunk = block * dev->param.chunks_per_block + dev->gc_chunk; 2580 2581 for (/* init already done */ ; 2582 ret_val == YAFFS_OK && 2583 dev->gc_chunk < dev->param.chunks_per_block && 2584 (bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) && 2585 max_copies > 0; 2586 dev->gc_chunk++, old_chunk++) { 2587 if (yaffs_check_chunk_bit(dev, block, dev->gc_chunk)) { 2588 /* Page is in use and might need to be copied */ 2589 max_copies--; 2590 ret_val = yaffs_gc_process_chunk(dev, bi, 2591 old_chunk, buffer); 2592 } 2593 } 2594 yaffs_release_temp_buffer(dev, buffer); 2595 } 2596 2597 yaffs_verify_collected_blk(dev, bi, block); 2598 2599 if (bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) { 2600 /* 2601 * The gc did not complete. Set block state back to FULL 2602 * because checkpointing does not restore gc. 2603 */ 2604 bi->block_state = YAFFS_BLOCK_STATE_FULL; 2605 } else { 2606 /* The gc completed. */ 2607 /* Do any required cleanups */ 2608 for (i = 0; i < dev->n_clean_ups; i++) { 2609 /* Time to delete the file too */ 2610 struct yaffs_obj *object = 2611 yaffs_find_by_number(dev, dev->gc_cleanup_list[i]); 2612 if (object) { 2613 yaffs_free_tnode(dev, 2614 object->variant.file_variant.top); 2615 object->variant.file_variant.top = NULL; 2616 yaffs_trace(YAFFS_TRACE_GC, 2617 "yaffs: About to finally delete object %d", 2618 object->obj_id); 2619 yaffs_generic_obj_del(object); 2620 object->my_dev->n_deleted_files--; 2621 } 2622 2623 } 2624 chunks_after = yaffs_get_erased_chunks(dev); 2625 if (chunks_before >= chunks_after) 2626 yaffs_trace(YAFFS_TRACE_GC, 2627 "gc did not increase free chunks before %d after %d", 2628 chunks_before, chunks_after); 2629 dev->gc_block = 0; 2630 dev->gc_chunk = 0; 2631 dev->n_clean_ups = 0; 2632 } 2633 2634 dev->gc_disable = 0; 2635 2636 return ret_val; 2637 } 2638 2639 /* 2640 * find_gc_block() selects the dirtiest block (or close enough) 2641 * for garbage collection. 2642 */ 2643 2644 static unsigned yaffs_find_gc_block(struct yaffs_dev *dev, 2645 int aggressive, int background) 2646 { 2647 int i; 2648 int iterations; 2649 unsigned selected = 0; 2650 int prioritised = 0; 2651 int prioritised_exist = 0; 2652 struct yaffs_block_info *bi; 2653 int threshold; 2654 2655 /* First let's see if we need to grab a prioritised block */ 2656 if (dev->has_pending_prioritised_gc && !aggressive) { 2657 dev->gc_dirtiest = 0; 2658 bi = dev->block_info; 2659 for (i = dev->internal_start_block; 2660 i <= dev->internal_end_block && !selected; i++) { 2661 2662 if (bi->gc_prioritise) { 2663 prioritised_exist = 1; 2664 if (bi->block_state == YAFFS_BLOCK_STATE_FULL && 2665 yaffs_block_ok_for_gc(dev, bi)) { 2666 selected = i; 2667 prioritised = 1; 2668 } 2669 } 2670 bi++; 2671 } 2672 2673 /* 2674 * If there is a prioritised block and none was selected then 2675 * this happened because there is at least one old dirty block 2676 * gumming up the works. Let's gc the oldest dirty block. 2677 */ 2678 2679 if (prioritised_exist && 2680 !selected && dev->oldest_dirty_block > 0) 2681 selected = dev->oldest_dirty_block; 2682 2683 if (!prioritised_exist) /* None found, so we can clear this */ 2684 dev->has_pending_prioritised_gc = 0; 2685 } 2686 2687 /* If we're doing aggressive GC then we are happy to take a less-dirty 2688 * block, and search harder. 2689 * else (leasurely gc), then we only bother to do this if the 2690 * block has only a few pages in use. 2691 */ 2692 2693 if (!selected) { 2694 int pages_used; 2695 int n_blocks = 2696 dev->internal_end_block - dev->internal_start_block + 1; 2697 if (aggressive) { 2698 threshold = dev->param.chunks_per_block; 2699 iterations = n_blocks; 2700 } else { 2701 int max_threshold; 2702 2703 if (background) 2704 max_threshold = dev->param.chunks_per_block / 2; 2705 else 2706 max_threshold = dev->param.chunks_per_block / 8; 2707 2708 if (max_threshold < YAFFS_GC_PASSIVE_THRESHOLD) 2709 max_threshold = YAFFS_GC_PASSIVE_THRESHOLD; 2710 2711 threshold = background ? (dev->gc_not_done + 2) * 2 : 0; 2712 if (threshold < YAFFS_GC_PASSIVE_THRESHOLD) 2713 threshold = YAFFS_GC_PASSIVE_THRESHOLD; 2714 if (threshold > max_threshold) 2715 threshold = max_threshold; 2716 2717 iterations = n_blocks / 16 + 1; 2718 if (iterations > 100) 2719 iterations = 100; 2720 } 2721 2722 for (i = 0; 2723 i < iterations && 2724 (dev->gc_dirtiest < 1 || 2725 dev->gc_pages_in_use > YAFFS_GC_GOOD_ENOUGH); 2726 i++) { 2727 dev->gc_block_finder++; 2728 if (dev->gc_block_finder < dev->internal_start_block || 2729 dev->gc_block_finder > dev->internal_end_block) 2730 dev->gc_block_finder = 2731 dev->internal_start_block; 2732 2733 bi = yaffs_get_block_info(dev, dev->gc_block_finder); 2734 2735 pages_used = bi->pages_in_use - bi->soft_del_pages; 2736 2737 if (bi->block_state == YAFFS_BLOCK_STATE_FULL && 2738 pages_used < dev->param.chunks_per_block && 2739 (dev->gc_dirtiest < 1 || 2740 pages_used < dev->gc_pages_in_use) && 2741 yaffs_block_ok_for_gc(dev, bi)) { 2742 dev->gc_dirtiest = dev->gc_block_finder; 2743 dev->gc_pages_in_use = pages_used; 2744 } 2745 } 2746 2747 if (dev->gc_dirtiest > 0 && dev->gc_pages_in_use <= threshold) 2748 selected = dev->gc_dirtiest; 2749 } 2750 2751 /* 2752 * If nothing has been selected for a while, try the oldest dirty 2753 * because that's gumming up the works. 2754 */ 2755 2756 if (!selected && dev->param.is_yaffs2 && 2757 dev->gc_not_done >= (background ? 10 : 20)) { 2758 yaffs2_find_oldest_dirty_seq(dev); 2759 if (dev->oldest_dirty_block > 0) { 2760 selected = dev->oldest_dirty_block; 2761 dev->gc_dirtiest = selected; 2762 dev->oldest_dirty_gc_count++; 2763 bi = yaffs_get_block_info(dev, selected); 2764 dev->gc_pages_in_use = 2765 bi->pages_in_use - bi->soft_del_pages; 2766 } else { 2767 dev->gc_not_done = 0; 2768 } 2769 } 2770 2771 if (selected) { 2772 yaffs_trace(YAFFS_TRACE_GC, 2773 "GC Selected block %d with %d free, prioritised:%d", 2774 selected, 2775 dev->param.chunks_per_block - dev->gc_pages_in_use, 2776 prioritised); 2777 2778 dev->n_gc_blocks++; 2779 if (background) 2780 dev->bg_gcs++; 2781 2782 dev->gc_dirtiest = 0; 2783 dev->gc_pages_in_use = 0; 2784 dev->gc_not_done = 0; 2785 if (dev->refresh_skip > 0) 2786 dev->refresh_skip--; 2787 } else { 2788 dev->gc_not_done++; 2789 yaffs_trace(YAFFS_TRACE_GC, 2790 "GC none: finder %d skip %d threshold %d dirtiest %d using %d oldest %d%s", 2791 dev->gc_block_finder, dev->gc_not_done, threshold, 2792 dev->gc_dirtiest, dev->gc_pages_in_use, 2793 dev->oldest_dirty_block, background ? " bg" : ""); 2794 } 2795 2796 return selected; 2797 } 2798 2799 /* New garbage collector 2800 * If we're very low on erased blocks then we do aggressive garbage collection 2801 * otherwise we do "leasurely" garbage collection. 2802 * Aggressive gc looks further (whole array) and will accept less dirty blocks. 2803 * Passive gc only inspects smaller areas and only accepts more dirty blocks. 2804 * 2805 * The idea is to help clear out space in a more spread-out manner. 2806 * Dunno if it really does anything useful. 2807 */ 2808 static int yaffs_check_gc(struct yaffs_dev *dev, int background) 2809 { 2810 int aggressive = 0; 2811 int gc_ok = YAFFS_OK; 2812 int max_tries = 0; 2813 int min_erased; 2814 int erased_chunks; 2815 int checkpt_block_adjust; 2816 2817 if (dev->param.gc_control && (dev->param.gc_control(dev) & 1) == 0) 2818 return YAFFS_OK; 2819 2820 if (dev->gc_disable) 2821 /* Bail out so we don't get recursive gc */ 2822 return YAFFS_OK; 2823 2824 /* This loop should pass the first time. 2825 * Only loops here if the collection does not increase space. 2826 */ 2827 2828 do { 2829 max_tries++; 2830 2831 checkpt_block_adjust = yaffs_calc_checkpt_blocks_required(dev); 2832 2833 min_erased = 2834 dev->param.n_reserved_blocks + checkpt_block_adjust + 1; 2835 erased_chunks = 2836 dev->n_erased_blocks * dev->param.chunks_per_block; 2837 2838 /* If we need a block soon then do aggressive gc. */ 2839 if (dev->n_erased_blocks < min_erased) 2840 aggressive = 1; 2841 else { 2842 if (!background 2843 && erased_chunks > (dev->n_free_chunks / 4)) 2844 break; 2845 2846 if (dev->gc_skip > 20) 2847 dev->gc_skip = 20; 2848 if (erased_chunks < dev->n_free_chunks / 2 || 2849 dev->gc_skip < 1 || background) 2850 aggressive = 0; 2851 else { 2852 dev->gc_skip--; 2853 break; 2854 } 2855 } 2856 2857 dev->gc_skip = 5; 2858 2859 /* If we don't already have a block being gc'd then see if we 2860 * should start another */ 2861 2862 if (dev->gc_block < 1 && !aggressive) { 2863 dev->gc_block = yaffs2_find_refresh_block(dev); 2864 dev->gc_chunk = 0; 2865 dev->n_clean_ups = 0; 2866 } 2867 if (dev->gc_block < 1) { 2868 dev->gc_block = 2869 yaffs_find_gc_block(dev, aggressive, background); 2870 dev->gc_chunk = 0; 2871 dev->n_clean_ups = 0; 2872 } 2873 2874 if (dev->gc_block > 0) { 2875 dev->all_gcs++; 2876 if (!aggressive) 2877 dev->passive_gc_count++; 2878 2879 yaffs_trace(YAFFS_TRACE_GC, 2880 "yaffs: GC n_erased_blocks %d aggressive %d", 2881 dev->n_erased_blocks, aggressive); 2882 2883 gc_ok = yaffs_gc_block(dev, dev->gc_block, aggressive); 2884 } 2885 2886 if (dev->n_erased_blocks < (dev->param.n_reserved_blocks) && 2887 dev->gc_block > 0) { 2888 yaffs_trace(YAFFS_TRACE_GC, 2889 "yaffs: GC !!!no reclaim!!! n_erased_blocks %d after try %d block %d", 2890 dev->n_erased_blocks, max_tries, 2891 dev->gc_block); 2892 } 2893 } while ((dev->n_erased_blocks < dev->param.n_reserved_blocks) && 2894 (dev->gc_block > 0) && (max_tries < 2)); 2895 2896 return aggressive ? gc_ok : YAFFS_OK; 2897 } 2898 2899 /* 2900 * yaffs_bg_gc() 2901 * Garbage collects. Intended to be called from a background thread. 2902 * Returns non-zero if at least half the free chunks are erased. 2903 */ 2904 int yaffs_bg_gc(struct yaffs_dev *dev, unsigned urgency) 2905 { 2906 int erased_chunks = dev->n_erased_blocks * dev->param.chunks_per_block; 2907 2908 yaffs_trace(YAFFS_TRACE_BACKGROUND, "Background gc %u", urgency); 2909 2910 yaffs_check_gc(dev, 1); 2911 return erased_chunks > dev->n_free_chunks / 2; 2912 } 2913 2914 /*-------------------- Data file manipulation -----------------*/ 2915 2916 static int yaffs_rd_data_obj(struct yaffs_obj *in, int inode_chunk, u8 * buffer) 2917 { 2918 int nand_chunk = yaffs_find_chunk_in_file(in, inode_chunk, NULL); 2919 2920 if (nand_chunk >= 0) 2921 return yaffs_rd_chunk_tags_nand(in->my_dev, nand_chunk, 2922 buffer, NULL); 2923 else { 2924 yaffs_trace(YAFFS_TRACE_NANDACCESS, 2925 "Chunk %d not found zero instead", 2926 nand_chunk); 2927 /* get sane (zero) data if you read a hole */ 2928 memset(buffer, 0, in->my_dev->data_bytes_per_chunk); 2929 return 0; 2930 } 2931 2932 } 2933 2934 void yaffs_chunk_del(struct yaffs_dev *dev, int chunk_id, int mark_flash, 2935 int lyn) 2936 { 2937 int block; 2938 int page; 2939 struct yaffs_ext_tags tags; 2940 struct yaffs_block_info *bi; 2941 2942 if (chunk_id <= 0) 2943 return; 2944 2945 dev->n_deletions++; 2946 block = chunk_id / dev->param.chunks_per_block; 2947 page = chunk_id % dev->param.chunks_per_block; 2948 2949 if (!yaffs_check_chunk_bit(dev, block, page)) 2950 yaffs_trace(YAFFS_TRACE_VERIFY, 2951 "Deleting invalid chunk %d", chunk_id); 2952 2953 bi = yaffs_get_block_info(dev, block); 2954 2955 yaffs2_update_oldest_dirty_seq(dev, block, bi); 2956 2957 yaffs_trace(YAFFS_TRACE_DELETION, 2958 "line %d delete of chunk %d", 2959 lyn, chunk_id); 2960 2961 if (!dev->param.is_yaffs2 && mark_flash && 2962 bi->block_state != YAFFS_BLOCK_STATE_COLLECTING) { 2963 2964 memset(&tags, 0, sizeof(tags)); 2965 tags.is_deleted = 1; 2966 yaffs_wr_chunk_tags_nand(dev, chunk_id, NULL, &tags); 2967 yaffs_handle_chunk_update(dev, chunk_id, &tags); 2968 } else { 2969 dev->n_unmarked_deletions++; 2970 } 2971 2972 /* Pull out of the management area. 2973 * If the whole block became dirty, this will kick off an erasure. 2974 */ 2975 if (bi->block_state == YAFFS_BLOCK_STATE_ALLOCATING || 2976 bi->block_state == YAFFS_BLOCK_STATE_FULL || 2977 bi->block_state == YAFFS_BLOCK_STATE_NEEDS_SCAN || 2978 bi->block_state == YAFFS_BLOCK_STATE_COLLECTING) { 2979 dev->n_free_chunks++; 2980 yaffs_clear_chunk_bit(dev, block, page); 2981 bi->pages_in_use--; 2982 2983 if (bi->pages_in_use == 0 && 2984 !bi->has_shrink_hdr && 2985 bi->block_state != YAFFS_BLOCK_STATE_ALLOCATING && 2986 bi->block_state != YAFFS_BLOCK_STATE_NEEDS_SCAN) { 2987 yaffs_block_became_dirty(dev, block); 2988 } 2989 } 2990 } 2991 2992 static int yaffs_wr_data_obj(struct yaffs_obj *in, int inode_chunk, 2993 const u8 *buffer, int n_bytes, int use_reserve) 2994 { 2995 /* Find old chunk Need to do this to get serial number 2996 * Write new one and patch into tree. 2997 * Invalidate old tags. 2998 */ 2999 3000 int prev_chunk_id; 3001 struct yaffs_ext_tags prev_tags; 3002 int new_chunk_id; 3003 struct yaffs_ext_tags new_tags; 3004 struct yaffs_dev *dev = in->my_dev; 3005 3006 yaffs_check_gc(dev, 0); 3007 3008 /* Get the previous chunk at this location in the file if it exists. 3009 * If it does not exist then put a zero into the tree. This creates 3010 * the tnode now, rather than later when it is harder to clean up. 3011 */ 3012 prev_chunk_id = yaffs_find_chunk_in_file(in, inode_chunk, &prev_tags); 3013 if (prev_chunk_id < 1 && 3014 !yaffs_put_chunk_in_file(in, inode_chunk, 0, 0)) 3015 return 0; 3016 3017 /* Set up new tags */ 3018 memset(&new_tags, 0, sizeof(new_tags)); 3019 3020 new_tags.chunk_id = inode_chunk; 3021 new_tags.obj_id = in->obj_id; 3022 new_tags.serial_number = 3023 (prev_chunk_id > 0) ? prev_tags.serial_number + 1 : 1; 3024 new_tags.n_bytes = n_bytes; 3025 3026 if (n_bytes < 1 || n_bytes > dev->param.total_bytes_per_chunk) { 3027 yaffs_trace(YAFFS_TRACE_ERROR, 3028 "Writing %d bytes to chunk!!!!!!!!!", 3029 n_bytes); 3030 BUG(); 3031 } 3032 3033 new_chunk_id = 3034 yaffs_write_new_chunk(dev, buffer, &new_tags, use_reserve); 3035 3036 if (new_chunk_id > 0) { 3037 yaffs_put_chunk_in_file(in, inode_chunk, new_chunk_id, 0); 3038 3039 if (prev_chunk_id > 0) 3040 yaffs_chunk_del(dev, prev_chunk_id, 1, __LINE__); 3041 3042 yaffs_verify_file_sane(in); 3043 } 3044 return new_chunk_id; 3045 3046 } 3047 3048 3049 3050 static int yaffs_do_xattrib_mod(struct yaffs_obj *obj, int set, 3051 const YCHAR *name, const void *value, int size, 3052 int flags) 3053 { 3054 struct yaffs_xattr_mod xmod; 3055 int result; 3056 3057 xmod.set = set; 3058 xmod.name = name; 3059 xmod.data = value; 3060 xmod.size = size; 3061 xmod.flags = flags; 3062 xmod.result = -ENOSPC; 3063 3064 result = yaffs_update_oh(obj, NULL, 0, 0, 0, &xmod); 3065 3066 if (result > 0) 3067 return xmod.result; 3068 else 3069 return -ENOSPC; 3070 } 3071 3072 static int yaffs_apply_xattrib_mod(struct yaffs_obj *obj, char *buffer, 3073 struct yaffs_xattr_mod *xmod) 3074 { 3075 int retval = 0; 3076 int x_offs = sizeof(struct yaffs_obj_hdr); 3077 struct yaffs_dev *dev = obj->my_dev; 3078 int x_size = dev->data_bytes_per_chunk - sizeof(struct yaffs_obj_hdr); 3079 char *x_buffer = buffer + x_offs; 3080 3081 if (xmod->set) 3082 retval = 3083 nval_set(x_buffer, x_size, xmod->name, xmod->data, 3084 xmod->size, xmod->flags); 3085 else 3086 retval = nval_del(x_buffer, x_size, xmod->name); 3087 3088 obj->has_xattr = nval_hasvalues(x_buffer, x_size); 3089 obj->xattr_known = 1; 3090 xmod->result = retval; 3091 3092 return retval; 3093 } 3094 3095 static int yaffs_do_xattrib_fetch(struct yaffs_obj *obj, const YCHAR *name, 3096 void *value, int size) 3097 { 3098 char *buffer = NULL; 3099 int result; 3100 struct yaffs_ext_tags tags; 3101 struct yaffs_dev *dev = obj->my_dev; 3102 int x_offs = sizeof(struct yaffs_obj_hdr); 3103 int x_size = dev->data_bytes_per_chunk - sizeof(struct yaffs_obj_hdr); 3104 char *x_buffer; 3105 int retval = 0; 3106 3107 if (obj->hdr_chunk < 1) 3108 return -ENODATA; 3109 3110 /* If we know that the object has no xattribs then don't do all the 3111 * reading and parsing. 3112 */ 3113 if (obj->xattr_known && !obj->has_xattr) { 3114 if (name) 3115 return -ENODATA; 3116 else 3117 return 0; 3118 } 3119 3120 buffer = (char *)yaffs_get_temp_buffer(dev); 3121 if (!buffer) 3122 return -ENOMEM; 3123 3124 result = 3125 yaffs_rd_chunk_tags_nand(dev, obj->hdr_chunk, (u8 *) buffer, &tags); 3126 3127 if (result != YAFFS_OK) 3128 retval = -ENOENT; 3129 else { 3130 x_buffer = buffer + x_offs; 3131 3132 if (!obj->xattr_known) { 3133 obj->has_xattr = nval_hasvalues(x_buffer, x_size); 3134 obj->xattr_known = 1; 3135 } 3136 3137 if (name) 3138 retval = nval_get(x_buffer, x_size, name, value, size); 3139 else 3140 retval = nval_list(x_buffer, x_size, value, size); 3141 } 3142 yaffs_release_temp_buffer(dev, (u8 *) buffer); 3143 return retval; 3144 } 3145 3146 int yaffs_set_xattrib(struct yaffs_obj *obj, const YCHAR * name, 3147 const void *value, int size, int flags) 3148 { 3149 return yaffs_do_xattrib_mod(obj, 1, name, value, size, flags); 3150 } 3151 3152 int yaffs_remove_xattrib(struct yaffs_obj *obj, const YCHAR * name) 3153 { 3154 return yaffs_do_xattrib_mod(obj, 0, name, NULL, 0, 0); 3155 } 3156 3157 int yaffs_get_xattrib(struct yaffs_obj *obj, const YCHAR * name, void *value, 3158 int size) 3159 { 3160 return yaffs_do_xattrib_fetch(obj, name, value, size); 3161 } 3162 3163 int yaffs_list_xattrib(struct yaffs_obj *obj, char *buffer, int size) 3164 { 3165 return yaffs_do_xattrib_fetch(obj, NULL, buffer, size); 3166 } 3167 3168 static void yaffs_check_obj_details_loaded(struct yaffs_obj *in) 3169 { 3170 u8 *buf; 3171 struct yaffs_obj_hdr *oh; 3172 struct yaffs_dev *dev; 3173 struct yaffs_ext_tags tags; 3174 3175 if (!in || !in->lazy_loaded || in->hdr_chunk < 1) 3176 return; 3177 3178 dev = in->my_dev; 3179 in->lazy_loaded = 0; 3180 buf = yaffs_get_temp_buffer(dev); 3181 3182 yaffs_rd_chunk_tags_nand(dev, in->hdr_chunk, buf, &tags); 3183 oh = (struct yaffs_obj_hdr *)buf; 3184 3185 in->yst_mode = oh->yst_mode; 3186 yaffs_load_attribs(in, oh); 3187 yaffs_set_obj_name_from_oh(in, oh); 3188 3189 if (in->variant_type == YAFFS_OBJECT_TYPE_SYMLINK) { 3190 in->variant.symlink_variant.alias = 3191 yaffs_clone_str(oh->alias); 3192 } 3193 yaffs_release_temp_buffer(dev, buf); 3194 } 3195 3196 static void yaffs_load_name_from_oh(struct yaffs_dev *dev, YCHAR *name, 3197 const YCHAR *oh_name, int buff_size) 3198 { 3199 #ifdef CONFIG_YAFFS_AUTO_UNICODE 3200 if (dev->param.auto_unicode) { 3201 if (*oh_name) { 3202 /* It is an ASCII name, do an ASCII to 3203 * unicode conversion */ 3204 const char *ascii_oh_name = (const char *)oh_name; 3205 int n = buff_size - 1; 3206 while (n > 0 && *ascii_oh_name) { 3207 *name = *ascii_oh_name; 3208 name++; 3209 ascii_oh_name++; 3210 n--; 3211 } 3212 } else { 3213 yaffs_strncpy(name, oh_name + 1, buff_size - 1); 3214 } 3215 } else { 3216 #else 3217 dev = dev; 3218 { 3219 #endif 3220 yaffs_strncpy(name, oh_name, buff_size - 1); 3221 } 3222 } 3223 3224 static void yaffs_load_oh_from_name(struct yaffs_dev *dev, YCHAR *oh_name, 3225 const YCHAR *name) 3226 { 3227 #ifdef CONFIG_YAFFS_AUTO_UNICODE 3228 3229 int is_ascii; 3230 YCHAR *w; 3231 3232 if (dev->param.auto_unicode) { 3233 3234 is_ascii = 1; 3235 w = name; 3236 3237 /* Figure out if the name will fit in ascii character set */ 3238 while (is_ascii && *w) { 3239 if ((*w) & 0xff00) 3240 is_ascii = 0; 3241 w++; 3242 } 3243 3244 if (is_ascii) { 3245 /* It is an ASCII name, so convert unicode to ascii */ 3246 char *ascii_oh_name = (char *)oh_name; 3247 int n = YAFFS_MAX_NAME_LENGTH - 1; 3248 while (n > 0 && *name) { 3249 *ascii_oh_name = *name; 3250 name++; 3251 ascii_oh_name++; 3252 n--; 3253 } 3254 } else { 3255 /* Unicode name, so save starting at the second YCHAR */ 3256 *oh_name = 0; 3257 yaffs_strncpy(oh_name + 1, name, YAFFS_MAX_NAME_LENGTH - 2); 3258 } 3259 } else { 3260 #else 3261 dev = dev; 3262 { 3263 #endif 3264 yaffs_strncpy(oh_name, name, YAFFS_MAX_NAME_LENGTH - 1); 3265 } 3266 } 3267 3268 /* UpdateObjectHeader updates the header on NAND for an object. 3269 * If name is not NULL, then that new name is used. 3270 */ 3271 int yaffs_update_oh(struct yaffs_obj *in, const YCHAR *name, int force, 3272 int is_shrink, int shadows, struct yaffs_xattr_mod *xmod) 3273 { 3274 3275 struct yaffs_block_info *bi; 3276 struct yaffs_dev *dev = in->my_dev; 3277 int prev_chunk_id; 3278 int ret_val = 0; 3279 int new_chunk_id; 3280 struct yaffs_ext_tags new_tags; 3281 struct yaffs_ext_tags old_tags; 3282 const YCHAR *alias = NULL; 3283 u8 *buffer = NULL; 3284 YCHAR old_name[YAFFS_MAX_NAME_LENGTH + 1]; 3285 struct yaffs_obj_hdr *oh = NULL; 3286 loff_t file_size = 0; 3287 3288 yaffs_strcpy(old_name, _Y("silly old name")); 3289 3290 if (in->fake && in != dev->root_dir && !force && !xmod) 3291 return ret_val; 3292 3293 yaffs_check_gc(dev, 0); 3294 yaffs_check_obj_details_loaded(in); 3295 3296 buffer = yaffs_get_temp_buffer(in->my_dev); 3297 oh = (struct yaffs_obj_hdr *)buffer; 3298 3299 prev_chunk_id = in->hdr_chunk; 3300 3301 if (prev_chunk_id > 0) { 3302 yaffs_rd_chunk_tags_nand(dev, prev_chunk_id, 3303 buffer, &old_tags); 3304 3305 yaffs_verify_oh(in, oh, &old_tags, 0); 3306 memcpy(old_name, oh->name, sizeof(oh->name)); 3307 memset(buffer, 0xff, sizeof(struct yaffs_obj_hdr)); 3308 } else { 3309 memset(buffer, 0xff, dev->data_bytes_per_chunk); 3310 } 3311 3312 oh->type = in->variant_type; 3313 oh->yst_mode = in->yst_mode; 3314 oh->shadows_obj = oh->inband_shadowed_obj_id = shadows; 3315 3316 yaffs_load_attribs_oh(oh, in); 3317 3318 if (in->parent) 3319 oh->parent_obj_id = in->parent->obj_id; 3320 else 3321 oh->parent_obj_id = 0; 3322 3323 if (name && *name) { 3324 memset(oh->name, 0, sizeof(oh->name)); 3325 yaffs_load_oh_from_name(dev, oh->name, name); 3326 } else if (prev_chunk_id > 0) { 3327 memcpy(oh->name, old_name, sizeof(oh->name)); 3328 } else { 3329 memset(oh->name, 0, sizeof(oh->name)); 3330 } 3331 3332 oh->is_shrink = is_shrink; 3333 3334 switch (in->variant_type) { 3335 case YAFFS_OBJECT_TYPE_UNKNOWN: 3336 /* Should not happen */ 3337 break; 3338 case YAFFS_OBJECT_TYPE_FILE: 3339 if (oh->parent_obj_id != YAFFS_OBJECTID_DELETED && 3340 oh->parent_obj_id != YAFFS_OBJECTID_UNLINKED) 3341 file_size = in->variant.file_variant.file_size; 3342 yaffs_oh_size_load(oh, file_size); 3343 break; 3344 case YAFFS_OBJECT_TYPE_HARDLINK: 3345 oh->equiv_id = in->variant.hardlink_variant.equiv_id; 3346 break; 3347 case YAFFS_OBJECT_TYPE_SPECIAL: 3348 /* Do nothing */ 3349 break; 3350 case YAFFS_OBJECT_TYPE_DIRECTORY: 3351 /* Do nothing */ 3352 break; 3353 case YAFFS_OBJECT_TYPE_SYMLINK: 3354 alias = in->variant.symlink_variant.alias; 3355 if (!alias) 3356 alias = _Y("no alias"); 3357 yaffs_strncpy(oh->alias, alias, YAFFS_MAX_ALIAS_LENGTH); 3358 oh->alias[YAFFS_MAX_ALIAS_LENGTH] = 0; 3359 break; 3360 } 3361 3362 /* process any xattrib modifications */ 3363 if (xmod) 3364 yaffs_apply_xattrib_mod(in, (char *)buffer, xmod); 3365 3366 /* Tags */ 3367 memset(&new_tags, 0, sizeof(new_tags)); 3368 in->serial++; 3369 new_tags.chunk_id = 0; 3370 new_tags.obj_id = in->obj_id; 3371 new_tags.serial_number = in->serial; 3372 3373 /* Add extra info for file header */ 3374 new_tags.extra_available = 1; 3375 new_tags.extra_parent_id = oh->parent_obj_id; 3376 new_tags.extra_file_size = file_size; 3377 new_tags.extra_is_shrink = oh->is_shrink; 3378 new_tags.extra_equiv_id = oh->equiv_id; 3379 new_tags.extra_shadows = (oh->shadows_obj > 0) ? 1 : 0; 3380 new_tags.extra_obj_type = in->variant_type; 3381 yaffs_verify_oh(in, oh, &new_tags, 1); 3382 3383 /* Create new chunk in NAND */ 3384 new_chunk_id = 3385 yaffs_write_new_chunk(dev, buffer, &new_tags, 3386 (prev_chunk_id > 0) ? 1 : 0); 3387 3388 if (buffer) 3389 yaffs_release_temp_buffer(dev, buffer); 3390 3391 if (new_chunk_id < 0) 3392 return new_chunk_id; 3393 3394 in->hdr_chunk = new_chunk_id; 3395 3396 if (prev_chunk_id > 0) 3397 yaffs_chunk_del(dev, prev_chunk_id, 1, __LINE__); 3398 3399 if (!yaffs_obj_cache_dirty(in)) 3400 in->dirty = 0; 3401 3402 /* If this was a shrink, then mark the block 3403 * that the chunk lives on */ 3404 if (is_shrink) { 3405 bi = yaffs_get_block_info(in->my_dev, 3406 new_chunk_id / 3407 in->my_dev->param.chunks_per_block); 3408 bi->has_shrink_hdr = 1; 3409 } 3410 3411 3412 return new_chunk_id; 3413 } 3414 3415 /*--------------------- File read/write ------------------------ 3416 * Read and write have very similar structures. 3417 * In general the read/write has three parts to it 3418 * An incomplete chunk to start with (if the read/write is not chunk-aligned) 3419 * Some complete chunks 3420 * An incomplete chunk to end off with 3421 * 3422 * Curve-balls: the first chunk might also be the last chunk. 3423 */ 3424 3425 int yaffs_file_rd(struct yaffs_obj *in, u8 * buffer, loff_t offset, int n_bytes) 3426 { 3427 int chunk; 3428 u32 start; 3429 int n_copy; 3430 int n = n_bytes; 3431 int n_done = 0; 3432 struct yaffs_cache *cache; 3433 struct yaffs_dev *dev; 3434 3435 dev = in->my_dev; 3436 3437 while (n > 0) { 3438 yaffs_addr_to_chunk(dev, offset, &chunk, &start); 3439 chunk++; 3440 3441 /* OK now check for the curveball where the start and end are in 3442 * the same chunk. 3443 */ 3444 if ((start + n) < dev->data_bytes_per_chunk) 3445 n_copy = n; 3446 else 3447 n_copy = dev->data_bytes_per_chunk - start; 3448 3449 cache = yaffs_find_chunk_cache(in, chunk); 3450 3451 /* If the chunk is already in the cache or it is less than 3452 * a whole chunk or we're using inband tags then use the cache 3453 * (if there is caching) else bypass the cache. 3454 */ 3455 if (cache || n_copy != dev->data_bytes_per_chunk || 3456 dev->param.inband_tags) { 3457 if (dev->param.n_caches > 0) { 3458 3459 /* If we can't find the data in the cache, 3460 * then load it up. */ 3461 3462 if (!cache) { 3463 cache = 3464 yaffs_grab_chunk_cache(in->my_dev); 3465 cache->object = in; 3466 cache->chunk_id = chunk; 3467 cache->dirty = 0; 3468 cache->locked = 0; 3469 yaffs_rd_data_obj(in, chunk, 3470 cache->data); 3471 cache->n_bytes = 0; 3472 } 3473 3474 yaffs_use_cache(dev, cache, 0); 3475 3476 cache->locked = 1; 3477 3478 memcpy(buffer, &cache->data[start], n_copy); 3479 3480 cache->locked = 0; 3481 } else { 3482 /* Read into the local buffer then copy.. */ 3483 3484 u8 *local_buffer = 3485 yaffs_get_temp_buffer(dev); 3486 yaffs_rd_data_obj(in, chunk, local_buffer); 3487 3488 memcpy(buffer, &local_buffer[start], n_copy); 3489 3490 yaffs_release_temp_buffer(dev, local_buffer); 3491 } 3492 } else { 3493 /* A full chunk. Read directly into the buffer. */ 3494 yaffs_rd_data_obj(in, chunk, buffer); 3495 } 3496 n -= n_copy; 3497 offset += n_copy; 3498 buffer += n_copy; 3499 n_done += n_copy; 3500 } 3501 return n_done; 3502 } 3503 3504 int yaffs_do_file_wr(struct yaffs_obj *in, const u8 *buffer, loff_t offset, 3505 int n_bytes, int write_through) 3506 { 3507 3508 int chunk; 3509 u32 start; 3510 int n_copy; 3511 int n = n_bytes; 3512 int n_done = 0; 3513 int n_writeback; 3514 loff_t start_write = offset; 3515 int chunk_written = 0; 3516 u32 n_bytes_read; 3517 loff_t chunk_start; 3518 struct yaffs_dev *dev; 3519 3520 dev = in->my_dev; 3521 3522 while (n > 0 && chunk_written >= 0) { 3523 yaffs_addr_to_chunk(dev, offset, &chunk, &start); 3524 3525 if (((loff_t)chunk) * 3526 dev->data_bytes_per_chunk + start != offset || 3527 start >= dev->data_bytes_per_chunk) { 3528 yaffs_trace(YAFFS_TRACE_ERROR, 3529 "AddrToChunk of offset %lld gives chunk %d start %d", 3530 offset, chunk, start); 3531 } 3532 chunk++; /* File pos to chunk in file offset */ 3533 3534 /* OK now check for the curveball where the start and end are in 3535 * the same chunk. 3536 */ 3537 3538 if ((start + n) < dev->data_bytes_per_chunk) { 3539 n_copy = n; 3540 3541 /* Now calculate how many bytes to write back.... 3542 * If we're overwriting and not writing to then end of 3543 * file then we need to write back as much as was there 3544 * before. 3545 */ 3546 3547 chunk_start = (((loff_t)(chunk - 1)) * 3548 dev->data_bytes_per_chunk); 3549 3550 if (chunk_start > in->variant.file_variant.file_size) 3551 n_bytes_read = 0; /* Past end of file */ 3552 else 3553 n_bytes_read = 3554 in->variant.file_variant.file_size - 3555 chunk_start; 3556 3557 if (n_bytes_read > dev->data_bytes_per_chunk) 3558 n_bytes_read = dev->data_bytes_per_chunk; 3559 3560 n_writeback = 3561 (n_bytes_read > 3562 (start + n)) ? n_bytes_read : (start + n); 3563 3564 if (n_writeback < 0 || 3565 n_writeback > dev->data_bytes_per_chunk) 3566 BUG(); 3567 3568 } else { 3569 n_copy = dev->data_bytes_per_chunk - start; 3570 n_writeback = dev->data_bytes_per_chunk; 3571 } 3572 3573 if (n_copy != dev->data_bytes_per_chunk || 3574 dev->param.inband_tags) { 3575 /* An incomplete start or end chunk (or maybe both 3576 * start and end chunk), or we're using inband tags, 3577 * so we want to use the cache buffers. 3578 */ 3579 if (dev->param.n_caches > 0) { 3580 struct yaffs_cache *cache; 3581 3582 /* If we can't find the data in the cache, then 3583 * load the cache */ 3584 cache = yaffs_find_chunk_cache(in, chunk); 3585 3586 if (!cache && 3587 yaffs_check_alloc_available(dev, 1)) { 3588 cache = yaffs_grab_chunk_cache(dev); 3589 cache->object = in; 3590 cache->chunk_id = chunk; 3591 cache->dirty = 0; 3592 cache->locked = 0; 3593 yaffs_rd_data_obj(in, chunk, 3594 cache->data); 3595 } else if (cache && 3596 !cache->dirty && 3597 !yaffs_check_alloc_available(dev, 3598 1)) { 3599 /* Drop the cache if it was a read cache 3600 * item and no space check has been made 3601 * for it. 3602 */ 3603 cache = NULL; 3604 } 3605 3606 if (cache) { 3607 yaffs_use_cache(dev, cache, 1); 3608 cache->locked = 1; 3609 3610 memcpy(&cache->data[start], buffer, 3611 n_copy); 3612 3613 cache->locked = 0; 3614 cache->n_bytes = n_writeback; 3615 3616 if (write_through) { 3617 chunk_written = 3618 yaffs_wr_data_obj 3619 (cache->object, 3620 cache->chunk_id, 3621 cache->data, 3622 cache->n_bytes, 1); 3623 cache->dirty = 0; 3624 } 3625 } else { 3626 chunk_written = -1; /* fail write */ 3627 } 3628 } else { 3629 /* An incomplete start or end chunk (or maybe 3630 * both start and end chunk). Read into the 3631 * local buffer then copy over and write back. 3632 */ 3633 3634 u8 *local_buffer = yaffs_get_temp_buffer(dev); 3635 3636 yaffs_rd_data_obj(in, chunk, local_buffer); 3637 memcpy(&local_buffer[start], buffer, n_copy); 3638 3639 chunk_written = 3640 yaffs_wr_data_obj(in, chunk, 3641 local_buffer, 3642 n_writeback, 0); 3643 3644 yaffs_release_temp_buffer(dev, local_buffer); 3645 } 3646 } else { 3647 /* A full chunk. Write directly from the buffer. */ 3648 3649 chunk_written = 3650 yaffs_wr_data_obj(in, chunk, buffer, 3651 dev->data_bytes_per_chunk, 0); 3652 3653 /* Since we've overwritten the cached data, 3654 * we better invalidate it. */ 3655 yaffs_invalidate_chunk_cache(in, chunk); 3656 } 3657 3658 if (chunk_written >= 0) { 3659 n -= n_copy; 3660 offset += n_copy; 3661 buffer += n_copy; 3662 n_done += n_copy; 3663 } 3664 } 3665 3666 /* Update file object */ 3667 3668 if ((start_write + n_done) > in->variant.file_variant.file_size) 3669 in->variant.file_variant.file_size = (start_write + n_done); 3670 3671 in->dirty = 1; 3672 return n_done; 3673 } 3674 3675 int yaffs_wr_file(struct yaffs_obj *in, const u8 *buffer, loff_t offset, 3676 int n_bytes, int write_through) 3677 { 3678 yaffs2_handle_hole(in, offset); 3679 return yaffs_do_file_wr(in, buffer, offset, n_bytes, write_through); 3680 } 3681 3682 /* ---------------------- File resizing stuff ------------------ */ 3683 3684 static void yaffs_prune_chunks(struct yaffs_obj *in, loff_t new_size) 3685 { 3686 3687 struct yaffs_dev *dev = in->my_dev; 3688 loff_t old_size = in->variant.file_variant.file_size; 3689 int i; 3690 int chunk_id; 3691 u32 dummy; 3692 int last_del; 3693 int start_del; 3694 3695 if (old_size > 0) 3696 yaffs_addr_to_chunk(dev, old_size - 1, &last_del, &dummy); 3697 else 3698 last_del = 0; 3699 3700 yaffs_addr_to_chunk(dev, new_size + dev->data_bytes_per_chunk - 1, 3701 &start_del, &dummy); 3702 last_del++; 3703 start_del++; 3704 3705 /* Delete backwards so that we don't end up with holes if 3706 * power is lost part-way through the operation. 3707 */ 3708 for (i = last_del; i >= start_del; i--) { 3709 /* NB this could be optimised somewhat, 3710 * eg. could retrieve the tags and write them without 3711 * using yaffs_chunk_del 3712 */ 3713 3714 chunk_id = yaffs_find_del_file_chunk(in, i, NULL); 3715 3716 if (chunk_id < 1) 3717 continue; 3718 3719 if (chunk_id < 3720 (dev->internal_start_block * dev->param.chunks_per_block) || 3721 chunk_id >= 3722 ((dev->internal_end_block + 1) * 3723 dev->param.chunks_per_block)) { 3724 yaffs_trace(YAFFS_TRACE_ALWAYS, 3725 "Found daft chunk_id %d for %d", 3726 chunk_id, i); 3727 } else { 3728 in->n_data_chunks--; 3729 yaffs_chunk_del(dev, chunk_id, 1, __LINE__); 3730 } 3731 } 3732 } 3733 3734 void yaffs_resize_file_down(struct yaffs_obj *obj, loff_t new_size) 3735 { 3736 int new_full; 3737 u32 new_partial; 3738 struct yaffs_dev *dev = obj->my_dev; 3739 3740 yaffs_addr_to_chunk(dev, new_size, &new_full, &new_partial); 3741 3742 yaffs_prune_chunks(obj, new_size); 3743 3744 if (new_partial != 0) { 3745 int last_chunk = 1 + new_full; 3746 u8 *local_buffer = yaffs_get_temp_buffer(dev); 3747 3748 /* Rewrite the last chunk with its new size and zero pad */ 3749 yaffs_rd_data_obj(obj, last_chunk, local_buffer); 3750 memset(local_buffer + new_partial, 0, 3751 dev->data_bytes_per_chunk - new_partial); 3752 3753 yaffs_wr_data_obj(obj, last_chunk, local_buffer, 3754 new_partial, 1); 3755 3756 yaffs_release_temp_buffer(dev, local_buffer); 3757 } 3758 3759 obj->variant.file_variant.file_size = new_size; 3760 3761 yaffs_prune_tree(dev, &obj->variant.file_variant); 3762 } 3763 3764 int yaffs_resize_file(struct yaffs_obj *in, loff_t new_size) 3765 { 3766 struct yaffs_dev *dev = in->my_dev; 3767 loff_t old_size = in->variant.file_variant.file_size; 3768 3769 yaffs_flush_file_cache(in); 3770 yaffs_invalidate_whole_cache(in); 3771 3772 yaffs_check_gc(dev, 0); 3773 3774 if (in->variant_type != YAFFS_OBJECT_TYPE_FILE) 3775 return YAFFS_FAIL; 3776 3777 if (new_size == old_size) 3778 return YAFFS_OK; 3779 3780 if (new_size > old_size) { 3781 yaffs2_handle_hole(in, new_size); 3782 in->variant.file_variant.file_size = new_size; 3783 } else { 3784 /* new_size < old_size */ 3785 yaffs_resize_file_down(in, new_size); 3786 } 3787 3788 /* Write a new object header to reflect the resize. 3789 * show we've shrunk the file, if need be 3790 * Do this only if the file is not in the deleted directories 3791 * and is not shadowed. 3792 */ 3793 if (in->parent && 3794 !in->is_shadowed && 3795 in->parent->obj_id != YAFFS_OBJECTID_UNLINKED && 3796 in->parent->obj_id != YAFFS_OBJECTID_DELETED) 3797 yaffs_update_oh(in, NULL, 0, 0, 0, NULL); 3798 3799 return YAFFS_OK; 3800 } 3801 3802 int yaffs_flush_file(struct yaffs_obj *in, int update_time, int data_sync) 3803 { 3804 if (!in->dirty) 3805 return YAFFS_OK; 3806 3807 yaffs_flush_file_cache(in); 3808 3809 if (data_sync) 3810 return YAFFS_OK; 3811 3812 if (update_time) 3813 yaffs_load_current_time(in, 0, 0); 3814 3815 return (yaffs_update_oh(in, NULL, 0, 0, 0, NULL) >= 0) ? 3816 YAFFS_OK : YAFFS_FAIL; 3817 } 3818 3819 3820 /* yaffs_del_file deletes the whole file data 3821 * and the inode associated with the file. 3822 * It does not delete the links associated with the file. 3823 */ 3824 static int yaffs_unlink_file_if_needed(struct yaffs_obj *in) 3825 { 3826 int ret_val; 3827 int del_now = 0; 3828 struct yaffs_dev *dev = in->my_dev; 3829 3830 if (!in->my_inode) 3831 del_now = 1; 3832 3833 if (del_now) { 3834 ret_val = 3835 yaffs_change_obj_name(in, in->my_dev->del_dir, 3836 _Y("deleted"), 0, 0); 3837 yaffs_trace(YAFFS_TRACE_TRACING, 3838 "yaffs: immediate deletion of file %d", 3839 in->obj_id); 3840 in->deleted = 1; 3841 in->my_dev->n_deleted_files++; 3842 if (dev->param.disable_soft_del || dev->param.is_yaffs2) 3843 yaffs_resize_file(in, 0); 3844 yaffs_soft_del_file(in); 3845 } else { 3846 ret_val = 3847 yaffs_change_obj_name(in, in->my_dev->unlinked_dir, 3848 _Y("unlinked"), 0, 0); 3849 } 3850 return ret_val; 3851 } 3852 3853 int yaffs_del_file(struct yaffs_obj *in) 3854 { 3855 int ret_val = YAFFS_OK; 3856 int deleted; /* Need to cache value on stack if in is freed */ 3857 struct yaffs_dev *dev = in->my_dev; 3858 3859 if (dev->param.disable_soft_del || dev->param.is_yaffs2) 3860 yaffs_resize_file(in, 0); 3861 3862 if (in->n_data_chunks > 0) { 3863 /* Use soft deletion if there is data in the file. 3864 * That won't be the case if it has been resized to zero. 3865 */ 3866 if (!in->unlinked) 3867 ret_val = yaffs_unlink_file_if_needed(in); 3868 3869 deleted = in->deleted; 3870 3871 if (ret_val == YAFFS_OK && in->unlinked && !in->deleted) { 3872 in->deleted = 1; 3873 deleted = 1; 3874 in->my_dev->n_deleted_files++; 3875 yaffs_soft_del_file(in); 3876 } 3877 return deleted ? YAFFS_OK : YAFFS_FAIL; 3878 } else { 3879 /* The file has no data chunks so we toss it immediately */ 3880 yaffs_free_tnode(in->my_dev, in->variant.file_variant.top); 3881 in->variant.file_variant.top = NULL; 3882 yaffs_generic_obj_del(in); 3883 3884 return YAFFS_OK; 3885 } 3886 } 3887 3888 int yaffs_is_non_empty_dir(struct yaffs_obj *obj) 3889 { 3890 return (obj && 3891 obj->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY) && 3892 !(list_empty(&obj->variant.dir_variant.children)); 3893 } 3894 3895 static int yaffs_del_dir(struct yaffs_obj *obj) 3896 { 3897 /* First check that the directory is empty. */ 3898 if (yaffs_is_non_empty_dir(obj)) 3899 return YAFFS_FAIL; 3900 3901 return yaffs_generic_obj_del(obj); 3902 } 3903 3904 static int yaffs_del_symlink(struct yaffs_obj *in) 3905 { 3906 kfree(in->variant.symlink_variant.alias); 3907 in->variant.symlink_variant.alias = NULL; 3908 3909 return yaffs_generic_obj_del(in); 3910 } 3911 3912 static int yaffs_del_link(struct yaffs_obj *in) 3913 { 3914 /* remove this hardlink from the list associated with the equivalent 3915 * object 3916 */ 3917 list_del_init(&in->hard_links); 3918 return yaffs_generic_obj_del(in); 3919 } 3920 3921 int yaffs_del_obj(struct yaffs_obj *obj) 3922 { 3923 int ret_val = -1; 3924 3925 switch (obj->variant_type) { 3926 case YAFFS_OBJECT_TYPE_FILE: 3927 ret_val = yaffs_del_file(obj); 3928 break; 3929 case YAFFS_OBJECT_TYPE_DIRECTORY: 3930 if (!list_empty(&obj->variant.dir_variant.dirty)) { 3931 yaffs_trace(YAFFS_TRACE_BACKGROUND, 3932 "Remove object %d from dirty directories", 3933 obj->obj_id); 3934 list_del_init(&obj->variant.dir_variant.dirty); 3935 } 3936 return yaffs_del_dir(obj); 3937 break; 3938 case YAFFS_OBJECT_TYPE_SYMLINK: 3939 ret_val = yaffs_del_symlink(obj); 3940 break; 3941 case YAFFS_OBJECT_TYPE_HARDLINK: 3942 ret_val = yaffs_del_link(obj); 3943 break; 3944 case YAFFS_OBJECT_TYPE_SPECIAL: 3945 ret_val = yaffs_generic_obj_del(obj); 3946 break; 3947 case YAFFS_OBJECT_TYPE_UNKNOWN: 3948 ret_val = 0; 3949 break; /* should not happen. */ 3950 } 3951 return ret_val; 3952 } 3953 3954 static int yaffs_unlink_worker(struct yaffs_obj *obj) 3955 { 3956 int del_now = 0; 3957 3958 if (!obj) 3959 return YAFFS_FAIL; 3960 3961 if (!obj->my_inode) 3962 del_now = 1; 3963 3964 yaffs_update_parent(obj->parent); 3965 3966 if (obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK) { 3967 return yaffs_del_link(obj); 3968 } else if (!list_empty(&obj->hard_links)) { 3969 /* Curve ball: We're unlinking an object that has a hardlink. 3970 * 3971 * This problem arises because we are not strictly following 3972 * The Linux link/inode model. 3973 * 3974 * We can't really delete the object. 3975 * Instead, we do the following: 3976 * - Select a hardlink. 3977 * - Unhook it from the hard links 3978 * - Move it from its parent directory so that the rename works. 3979 * - Rename the object to the hardlink's name. 3980 * - Delete the hardlink 3981 */ 3982 3983 struct yaffs_obj *hl; 3984 struct yaffs_obj *parent; 3985 int ret_val; 3986 YCHAR name[YAFFS_MAX_NAME_LENGTH + 1]; 3987 3988 hl = list_entry(obj->hard_links.next, struct yaffs_obj, 3989 hard_links); 3990 3991 yaffs_get_obj_name(hl, name, YAFFS_MAX_NAME_LENGTH + 1); 3992 parent = hl->parent; 3993 3994 list_del_init(&hl->hard_links); 3995 3996 yaffs_add_obj_to_dir(obj->my_dev->unlinked_dir, hl); 3997 3998 ret_val = yaffs_change_obj_name(obj, parent, name, 0, 0); 3999 4000 if (ret_val == YAFFS_OK) 4001 ret_val = yaffs_generic_obj_del(hl); 4002 4003 return ret_val; 4004 4005 } else if (del_now) { 4006 switch (obj->variant_type) { 4007 case YAFFS_OBJECT_TYPE_FILE: 4008 return yaffs_del_file(obj); 4009 break; 4010 case YAFFS_OBJECT_TYPE_DIRECTORY: 4011 list_del_init(&obj->variant.dir_variant.dirty); 4012 return yaffs_del_dir(obj); 4013 break; 4014 case YAFFS_OBJECT_TYPE_SYMLINK: 4015 return yaffs_del_symlink(obj); 4016 break; 4017 case YAFFS_OBJECT_TYPE_SPECIAL: 4018 return yaffs_generic_obj_del(obj); 4019 break; 4020 case YAFFS_OBJECT_TYPE_HARDLINK: 4021 case YAFFS_OBJECT_TYPE_UNKNOWN: 4022 default: 4023 return YAFFS_FAIL; 4024 } 4025 } else if (yaffs_is_non_empty_dir(obj)) { 4026 return YAFFS_FAIL; 4027 } else { 4028 return yaffs_change_obj_name(obj, obj->my_dev->unlinked_dir, 4029 _Y("unlinked"), 0, 0); 4030 } 4031 } 4032 4033 static int yaffs_unlink_obj(struct yaffs_obj *obj) 4034 { 4035 if (obj && obj->unlink_allowed) 4036 return yaffs_unlink_worker(obj); 4037 4038 return YAFFS_FAIL; 4039 } 4040 4041 int yaffs_unlinker(struct yaffs_obj *dir, const YCHAR *name) 4042 { 4043 struct yaffs_obj *obj; 4044 4045 obj = yaffs_find_by_name(dir, name); 4046 return yaffs_unlink_obj(obj); 4047 } 4048 4049 /* Note: 4050 * If old_name is NULL then we take old_dir as the object to be renamed. 4051 */ 4052 int yaffs_rename_obj(struct yaffs_obj *old_dir, const YCHAR *old_name, 4053 struct yaffs_obj *new_dir, const YCHAR *new_name) 4054 { 4055 struct yaffs_obj *obj = NULL; 4056 struct yaffs_obj *existing_target = NULL; 4057 int force = 0; 4058 int result; 4059 struct yaffs_dev *dev; 4060 4061 if (!old_dir || old_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) { 4062 BUG(); 4063 return YAFFS_FAIL; 4064 } 4065 if (!new_dir || new_dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) { 4066 BUG(); 4067 return YAFFS_FAIL; 4068 } 4069 4070 dev = old_dir->my_dev; 4071 4072 #ifdef CONFIG_YAFFS_CASE_INSENSITIVE 4073 /* Special case for case insemsitive systems. 4074 * While look-up is case insensitive, the name isn't. 4075 * Therefore we might want to change x.txt to X.txt 4076 */ 4077 if (old_dir == new_dir && 4078 old_name && new_name && 4079 yaffs_strcmp(old_name, new_name) == 0) 4080 force = 1; 4081 #endif 4082 4083 if (yaffs_strnlen(new_name, YAFFS_MAX_NAME_LENGTH + 1) > 4084 YAFFS_MAX_NAME_LENGTH) 4085 /* ENAMETOOLONG */ 4086 return YAFFS_FAIL; 4087 4088 if (old_name) 4089 obj = yaffs_find_by_name(old_dir, old_name); 4090 else{ 4091 obj = old_dir; 4092 old_dir = obj->parent; 4093 } 4094 4095 if (obj && obj->rename_allowed) { 4096 /* Now handle an existing target, if there is one */ 4097 existing_target = yaffs_find_by_name(new_dir, new_name); 4098 if (yaffs_is_non_empty_dir(existing_target)) { 4099 return YAFFS_FAIL; /* ENOTEMPTY */ 4100 } else if (existing_target && existing_target != obj) { 4101 /* Nuke the target first, using shadowing, 4102 * but only if it isn't the same object. 4103 * 4104 * Note we must disable gc here otherwise it can mess 4105 * up the shadowing. 4106 * 4107 */ 4108 dev->gc_disable = 1; 4109 yaffs_change_obj_name(obj, new_dir, new_name, force, 4110 existing_target->obj_id); 4111 existing_target->is_shadowed = 1; 4112 yaffs_unlink_obj(existing_target); 4113 dev->gc_disable = 0; 4114 } 4115 4116 result = yaffs_change_obj_name(obj, new_dir, new_name, 1, 0); 4117 4118 yaffs_update_parent(old_dir); 4119 if (new_dir != old_dir) 4120 yaffs_update_parent(new_dir); 4121 4122 return result; 4123 } 4124 return YAFFS_FAIL; 4125 } 4126 4127 /*----------------------- Initialisation Scanning ---------------------- */ 4128 4129 void yaffs_handle_shadowed_obj(struct yaffs_dev *dev, int obj_id, 4130 int backward_scanning) 4131 { 4132 struct yaffs_obj *obj; 4133 4134 if (backward_scanning) { 4135 /* Handle YAFFS2 case (backward scanning) 4136 * If the shadowed object exists then ignore. 4137 */ 4138 obj = yaffs_find_by_number(dev, obj_id); 4139 if (obj) 4140 return; 4141 } 4142 4143 /* Let's create it (if it does not exist) assuming it is a file so that 4144 * it can do shrinking etc. 4145 * We put it in unlinked dir to be cleaned up after the scanning 4146 */ 4147 obj = 4148 yaffs_find_or_create_by_number(dev, obj_id, YAFFS_OBJECT_TYPE_FILE); 4149 if (!obj) 4150 return; 4151 obj->is_shadowed = 1; 4152 yaffs_add_obj_to_dir(dev->unlinked_dir, obj); 4153 obj->variant.file_variant.shrink_size = 0; 4154 obj->valid = 1; /* So that we don't read any other info. */ 4155 } 4156 4157 void yaffs_link_fixup(struct yaffs_dev *dev, struct list_head *hard_list) 4158 { 4159 struct list_head *lh; 4160 struct list_head *save; 4161 struct yaffs_obj *hl; 4162 struct yaffs_obj *in; 4163 4164 list_for_each_safe(lh, save, hard_list) { 4165 hl = list_entry(lh, struct yaffs_obj, hard_links); 4166 in = yaffs_find_by_number(dev, 4167 hl->variant.hardlink_variant.equiv_id); 4168 4169 if (in) { 4170 /* Add the hardlink pointers */ 4171 hl->variant.hardlink_variant.equiv_obj = in; 4172 list_add(&hl->hard_links, &in->hard_links); 4173 } else { 4174 /* Todo Need to report/handle this better. 4175 * Got a problem... hardlink to a non-existant object 4176 */ 4177 hl->variant.hardlink_variant.equiv_obj = NULL; 4178 INIT_LIST_HEAD(&hl->hard_links); 4179 } 4180 } 4181 } 4182 4183 static void yaffs_strip_deleted_objs(struct yaffs_dev *dev) 4184 { 4185 /* 4186 * Sort out state of unlinked and deleted objects after scanning. 4187 */ 4188 struct list_head *i; 4189 struct list_head *n; 4190 struct yaffs_obj *l; 4191 4192 if (dev->read_only) 4193 return; 4194 4195 /* Soft delete all the unlinked files */ 4196 list_for_each_safe(i, n, 4197 &dev->unlinked_dir->variant.dir_variant.children) { 4198 l = list_entry(i, struct yaffs_obj, siblings); 4199 yaffs_del_obj(l); 4200 } 4201 4202 list_for_each_safe(i, n, &dev->del_dir->variant.dir_variant.children) { 4203 l = list_entry(i, struct yaffs_obj, siblings); 4204 yaffs_del_obj(l); 4205 } 4206 } 4207 4208 /* 4209 * This code iterates through all the objects making sure that they are rooted. 4210 * Any unrooted objects are re-rooted in lost+found. 4211 * An object needs to be in one of: 4212 * - Directly under deleted, unlinked 4213 * - Directly or indirectly under root. 4214 * 4215 * Note: 4216 * This code assumes that we don't ever change the current relationships 4217 * between directories: 4218 * root_dir->parent == unlinked_dir->parent == del_dir->parent == NULL 4219 * lost-n-found->parent == root_dir 4220 * 4221 * This fixes the problem where directories might have inadvertently been 4222 * deleted leaving the object "hanging" without being rooted in the 4223 * directory tree. 4224 */ 4225 4226 static int yaffs_has_null_parent(struct yaffs_dev *dev, struct yaffs_obj *obj) 4227 { 4228 return (obj == dev->del_dir || 4229 obj == dev->unlinked_dir || obj == dev->root_dir); 4230 } 4231 4232 static void yaffs_fix_hanging_objs(struct yaffs_dev *dev) 4233 { 4234 struct yaffs_obj *obj; 4235 struct yaffs_obj *parent; 4236 int i; 4237 struct list_head *lh; 4238 struct list_head *n; 4239 int depth_limit; 4240 int hanging; 4241 4242 if (dev->read_only) 4243 return; 4244 4245 /* Iterate through the objects in each hash entry, 4246 * looking at each object. 4247 * Make sure it is rooted. 4248 */ 4249 4250 for (i = 0; i < YAFFS_NOBJECT_BUCKETS; i++) { 4251 list_for_each_safe(lh, n, &dev->obj_bucket[i].list) { 4252 obj = list_entry(lh, struct yaffs_obj, hash_link); 4253 parent = obj->parent; 4254 4255 if (yaffs_has_null_parent(dev, obj)) { 4256 /* These directories are not hanging */ 4257 hanging = 0; 4258 } else if (!parent || 4259 parent->variant_type != 4260 YAFFS_OBJECT_TYPE_DIRECTORY) { 4261 hanging = 1; 4262 } else if (yaffs_has_null_parent(dev, parent)) { 4263 hanging = 0; 4264 } else { 4265 /* 4266 * Need to follow the parent chain to 4267 * see if it is hanging. 4268 */ 4269 hanging = 0; 4270 depth_limit = 100; 4271 4272 while (parent != dev->root_dir && 4273 parent->parent && 4274 parent->parent->variant_type == 4275 YAFFS_OBJECT_TYPE_DIRECTORY && 4276 depth_limit > 0) { 4277 parent = parent->parent; 4278 depth_limit--; 4279 } 4280 if (parent != dev->root_dir) 4281 hanging = 1; 4282 } 4283 if (hanging) { 4284 yaffs_trace(YAFFS_TRACE_SCAN, 4285 "Hanging object %d moved to lost and found", 4286 obj->obj_id); 4287 yaffs_add_obj_to_dir(dev->lost_n_found, obj); 4288 } 4289 } 4290 } 4291 } 4292 4293 /* 4294 * Delete directory contents for cleaning up lost and found. 4295 */ 4296 static void yaffs_del_dir_contents(struct yaffs_obj *dir) 4297 { 4298 struct yaffs_obj *obj; 4299 struct list_head *lh; 4300 struct list_head *n; 4301 4302 if (dir->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) 4303 BUG(); 4304 4305 list_for_each_safe(lh, n, &dir->variant.dir_variant.children) { 4306 obj = list_entry(lh, struct yaffs_obj, siblings); 4307 if (obj->variant_type == YAFFS_OBJECT_TYPE_DIRECTORY) 4308 yaffs_del_dir_contents(obj); 4309 yaffs_trace(YAFFS_TRACE_SCAN, 4310 "Deleting lost_found object %d", 4311 obj->obj_id); 4312 yaffs_unlink_obj(obj); 4313 } 4314 } 4315 4316 static void yaffs_empty_l_n_f(struct yaffs_dev *dev) 4317 { 4318 yaffs_del_dir_contents(dev->lost_n_found); 4319 } 4320 4321 4322 struct yaffs_obj *yaffs_find_by_name(struct yaffs_obj *directory, 4323 const YCHAR *name) 4324 { 4325 int sum; 4326 struct list_head *i; 4327 YCHAR buffer[YAFFS_MAX_NAME_LENGTH + 1]; 4328 struct yaffs_obj *l; 4329 4330 if (!name) 4331 return NULL; 4332 4333 if (!directory) { 4334 yaffs_trace(YAFFS_TRACE_ALWAYS, 4335 "tragedy: yaffs_find_by_name: null pointer directory" 4336 ); 4337 BUG(); 4338 return NULL; 4339 } 4340 if (directory->variant_type != YAFFS_OBJECT_TYPE_DIRECTORY) { 4341 yaffs_trace(YAFFS_TRACE_ALWAYS, 4342 "tragedy: yaffs_find_by_name: non-directory" 4343 ); 4344 BUG(); 4345 } 4346 4347 sum = yaffs_calc_name_sum(name); 4348 4349 list_for_each(i, &directory->variant.dir_variant.children) { 4350 l = list_entry(i, struct yaffs_obj, siblings); 4351 4352 if (l->parent != directory) 4353 BUG(); 4354 4355 yaffs_check_obj_details_loaded(l); 4356 4357 /* Special case for lost-n-found */ 4358 if (l->obj_id == YAFFS_OBJECTID_LOSTNFOUND) { 4359 if (!yaffs_strcmp(name, YAFFS_LOSTNFOUND_NAME)) 4360 return l; 4361 } else if (l->sum == sum || l->hdr_chunk <= 0) { 4362 /* LostnFound chunk called Objxxx 4363 * Do a real check 4364 */ 4365 yaffs_get_obj_name(l, buffer, 4366 YAFFS_MAX_NAME_LENGTH + 1); 4367 if (!yaffs_strncmp(name, buffer, YAFFS_MAX_NAME_LENGTH)) 4368 return l; 4369 } 4370 } 4371 return NULL; 4372 } 4373 4374 /* GetEquivalentObject dereferences any hard links to get to the 4375 * actual object. 4376 */ 4377 4378 struct yaffs_obj *yaffs_get_equivalent_obj(struct yaffs_obj *obj) 4379 { 4380 if (obj && obj->variant_type == YAFFS_OBJECT_TYPE_HARDLINK) { 4381 obj = obj->variant.hardlink_variant.equiv_obj; 4382 yaffs_check_obj_details_loaded(obj); 4383 } 4384 return obj; 4385 } 4386 4387 /* 4388 * A note or two on object names. 4389 * * If the object name is missing, we then make one up in the form objnnn 4390 * 4391 * * ASCII names are stored in the object header's name field from byte zero 4392 * * Unicode names are historically stored starting from byte zero. 4393 * 4394 * Then there are automatic Unicode names... 4395 * The purpose of these is to save names in a way that can be read as 4396 * ASCII or Unicode names as appropriate, thus allowing a Unicode and ASCII 4397 * system to share files. 4398 * 4399 * These automatic unicode are stored slightly differently... 4400 * - If the name can fit in the ASCII character space then they are saved as 4401 * ascii names as per above. 4402 * - If the name needs Unicode then the name is saved in Unicode 4403 * starting at oh->name[1]. 4404 4405 */ 4406 static void yaffs_fix_null_name(struct yaffs_obj *obj, YCHAR *name, 4407 int buffer_size) 4408 { 4409 /* Create an object name if we could not find one. */ 4410 if (yaffs_strnlen(name, YAFFS_MAX_NAME_LENGTH) == 0) { 4411 YCHAR local_name[20]; 4412 YCHAR num_string[20]; 4413 YCHAR *x = &num_string[19]; 4414 unsigned v = obj->obj_id; 4415 num_string[19] = 0; 4416 while (v > 0) { 4417 x--; 4418 *x = '0' + (v % 10); 4419 v /= 10; 4420 } 4421 /* make up a name */ 4422 yaffs_strcpy(local_name, YAFFS_LOSTNFOUND_PREFIX); 4423 yaffs_strcat(local_name, x); 4424 yaffs_strncpy(name, local_name, buffer_size - 1); 4425 } 4426 } 4427 4428 int yaffs_get_obj_name(struct yaffs_obj *obj, YCHAR *name, int buffer_size) 4429 { 4430 memset(name, 0, buffer_size * sizeof(YCHAR)); 4431 yaffs_check_obj_details_loaded(obj); 4432 if (obj->obj_id == YAFFS_OBJECTID_LOSTNFOUND) { 4433 yaffs_strncpy(name, YAFFS_LOSTNFOUND_NAME, buffer_size - 1); 4434 } else if (obj->short_name[0]) { 4435 yaffs_strcpy(name, obj->short_name); 4436 } else if (obj->hdr_chunk > 0) { 4437 u8 *buffer = yaffs_get_temp_buffer(obj->my_dev); 4438 4439 struct yaffs_obj_hdr *oh = (struct yaffs_obj_hdr *)buffer; 4440 4441 memset(buffer, 0, obj->my_dev->data_bytes_per_chunk); 4442 4443 if (obj->hdr_chunk > 0) { 4444 yaffs_rd_chunk_tags_nand(obj->my_dev, 4445 obj->hdr_chunk, 4446 buffer, NULL); 4447 } 4448 yaffs_load_name_from_oh(obj->my_dev, name, oh->name, 4449 buffer_size); 4450 4451 yaffs_release_temp_buffer(obj->my_dev, buffer); 4452 } 4453 4454 yaffs_fix_null_name(obj, name, buffer_size); 4455 4456 return yaffs_strnlen(name, YAFFS_MAX_NAME_LENGTH); 4457 } 4458 4459 loff_t yaffs_get_obj_length(struct yaffs_obj *obj) 4460 { 4461 /* Dereference any hard linking */ 4462 obj = yaffs_get_equivalent_obj(obj); 4463 4464 if (obj->variant_type == YAFFS_OBJECT_TYPE_FILE) 4465 return obj->variant.file_variant.file_size; 4466 if (obj->variant_type == YAFFS_OBJECT_TYPE_SYMLINK) { 4467 if (!obj->variant.symlink_variant.alias) 4468 return 0; 4469 return yaffs_strnlen(obj->variant.symlink_variant.alias, 4470 YAFFS_MAX_ALIAS_LENGTH); 4471 } else { 4472 /* Only a directory should drop through to here */ 4473 return obj->my_dev->data_bytes_per_chunk; 4474 } 4475 } 4476 4477 int yaffs_get_obj_link_count(struct yaffs_obj *obj) 4478 { 4479 int count = 0; 4480 struct list_head *i; 4481 4482 if (!obj->unlinked) 4483 count++; /* the object itself */ 4484 4485 list_for_each(i, &obj->hard_links) 4486 count++; /* add the hard links; */ 4487 4488 return count; 4489 } 4490 4491 int yaffs_get_obj_inode(struct yaffs_obj *obj) 4492 { 4493 obj = yaffs_get_equivalent_obj(obj); 4494 4495 return obj->obj_id; 4496 } 4497 4498 unsigned yaffs_get_obj_type(struct yaffs_obj *obj) 4499 { 4500 obj = yaffs_get_equivalent_obj(obj); 4501 4502 switch (obj->variant_type) { 4503 case YAFFS_OBJECT_TYPE_FILE: 4504 return DT_REG; 4505 break; 4506 case YAFFS_OBJECT_TYPE_DIRECTORY: 4507 return DT_DIR; 4508 break; 4509 case YAFFS_OBJECT_TYPE_SYMLINK: 4510 return DT_LNK; 4511 break; 4512 case YAFFS_OBJECT_TYPE_HARDLINK: 4513 return DT_REG; 4514 break; 4515 case YAFFS_OBJECT_TYPE_SPECIAL: 4516 if (S_ISFIFO(obj->yst_mode)) 4517 return DT_FIFO; 4518 if (S_ISCHR(obj->yst_mode)) 4519 return DT_CHR; 4520 if (S_ISBLK(obj->yst_mode)) 4521 return DT_BLK; 4522 if (S_ISSOCK(obj->yst_mode)) 4523 return DT_SOCK; 4524 return DT_REG; 4525 break; 4526 default: 4527 return DT_REG; 4528 break; 4529 } 4530 } 4531 4532 YCHAR *yaffs_get_symlink_alias(struct yaffs_obj *obj) 4533 { 4534 obj = yaffs_get_equivalent_obj(obj); 4535 if (obj->variant_type == YAFFS_OBJECT_TYPE_SYMLINK) 4536 return yaffs_clone_str(obj->variant.symlink_variant.alias); 4537 else 4538 return yaffs_clone_str(_Y("")); 4539 } 4540 4541 /*--------------------------- Initialisation code -------------------------- */ 4542 4543 static int yaffs_check_dev_fns(const struct yaffs_dev *dev) 4544 { 4545 /* Common functions, gotta have */ 4546 if (!dev->param.erase_fn || !dev->param.initialise_flash_fn) 4547 return 0; 4548 4549 /* Can use the "with tags" style interface for yaffs1 or yaffs2 */ 4550 if (dev->param.write_chunk_tags_fn && 4551 dev->param.read_chunk_tags_fn && 4552 !dev->param.write_chunk_fn && 4553 !dev->param.read_chunk_fn && 4554 dev->param.bad_block_fn && dev->param.query_block_fn) 4555 return 1; 4556 4557 /* Can use the "spare" style interface for yaffs1 */ 4558 if (!dev->param.is_yaffs2 && 4559 !dev->param.write_chunk_tags_fn && 4560 !dev->param.read_chunk_tags_fn && 4561 dev->param.write_chunk_fn && 4562 dev->param.read_chunk_fn && 4563 !dev->param.bad_block_fn && !dev->param.query_block_fn) 4564 return 1; 4565 4566 return 0; /* bad */ 4567 } 4568 4569 static int yaffs_create_initial_dir(struct yaffs_dev *dev) 4570 { 4571 /* Initialise the unlinked, deleted, root and lost+found directories */ 4572 dev->lost_n_found = dev->root_dir = NULL; 4573 dev->unlinked_dir = dev->del_dir = NULL; 4574 dev->unlinked_dir = 4575 yaffs_create_fake_dir(dev, YAFFS_OBJECTID_UNLINKED, S_IFDIR); 4576 dev->del_dir = 4577 yaffs_create_fake_dir(dev, YAFFS_OBJECTID_DELETED, S_IFDIR); 4578 dev->root_dir = 4579 yaffs_create_fake_dir(dev, YAFFS_OBJECTID_ROOT, 4580 YAFFS_ROOT_MODE | S_IFDIR); 4581 dev->lost_n_found = 4582 yaffs_create_fake_dir(dev, YAFFS_OBJECTID_LOSTNFOUND, 4583 YAFFS_LOSTNFOUND_MODE | S_IFDIR); 4584 4585 if (dev->lost_n_found && dev->root_dir && dev->unlinked_dir 4586 && dev->del_dir) { 4587 yaffs_add_obj_to_dir(dev->root_dir, dev->lost_n_found); 4588 return YAFFS_OK; 4589 } 4590 return YAFFS_FAIL; 4591 } 4592 4593 int yaffs_guts_initialise(struct yaffs_dev *dev) 4594 { 4595 int init_failed = 0; 4596 unsigned x; 4597 int bits; 4598 4599 yaffs_trace(YAFFS_TRACE_TRACING, "yaffs: yaffs_guts_initialise()"); 4600 4601 /* Check stuff that must be set */ 4602 4603 if (!dev) { 4604 yaffs_trace(YAFFS_TRACE_ALWAYS, 4605 "yaffs: Need a device" 4606 ); 4607 return YAFFS_FAIL; 4608 } 4609 4610 if (dev->is_mounted) { 4611 yaffs_trace(YAFFS_TRACE_ALWAYS, "device already mounted"); 4612 return YAFFS_FAIL; 4613 } 4614 4615 dev->internal_start_block = dev->param.start_block; 4616 dev->internal_end_block = dev->param.end_block; 4617 dev->block_offset = 0; 4618 dev->chunk_offset = 0; 4619 dev->n_free_chunks = 0; 4620 4621 dev->gc_block = 0; 4622 4623 if (dev->param.start_block == 0) { 4624 dev->internal_start_block = dev->param.start_block + 1; 4625 dev->internal_end_block = dev->param.end_block + 1; 4626 dev->block_offset = 1; 4627 dev->chunk_offset = dev->param.chunks_per_block; 4628 } 4629 4630 /* Check geometry parameters. */ 4631 4632 if ((!dev->param.inband_tags && dev->param.is_yaffs2 && 4633 dev->param.total_bytes_per_chunk < 1024) || 4634 (!dev->param.is_yaffs2 && 4635 dev->param.total_bytes_per_chunk < 512) || 4636 (dev->param.inband_tags && !dev->param.is_yaffs2) || 4637 dev->param.chunks_per_block < 2 || 4638 dev->param.n_reserved_blocks < 2 || 4639 dev->internal_start_block <= 0 || 4640 dev->internal_end_block <= 0 || 4641 dev->internal_end_block <= 4642 (dev->internal_start_block + dev->param.n_reserved_blocks + 2) 4643 ) { 4644 /* otherwise it is too small */ 4645 yaffs_trace(YAFFS_TRACE_ALWAYS, 4646 "NAND geometry problems: chunk size %d, type is yaffs%s, inband_tags %d ", 4647 dev->param.total_bytes_per_chunk, 4648 dev->param.is_yaffs2 ? "2" : "", 4649 dev->param.inband_tags); 4650 return YAFFS_FAIL; 4651 } 4652 4653 if (yaffs_init_nand(dev) != YAFFS_OK) { 4654 yaffs_trace(YAFFS_TRACE_ALWAYS, "InitialiseNAND failed"); 4655 return YAFFS_FAIL; 4656 } 4657 4658 /* Sort out space for inband tags, if required */ 4659 if (dev->param.inband_tags) 4660 dev->data_bytes_per_chunk = 4661 dev->param.total_bytes_per_chunk - 4662 sizeof(struct yaffs_packed_tags2_tags_only); 4663 else 4664 dev->data_bytes_per_chunk = dev->param.total_bytes_per_chunk; 4665 4666 /* Got the right mix of functions? */ 4667 if (!yaffs_check_dev_fns(dev)) { 4668 /* Function missing */ 4669 yaffs_trace(YAFFS_TRACE_ALWAYS, 4670 "device function(s) missing or wrong"); 4671 4672 return YAFFS_FAIL; 4673 } 4674 4675 /* Finished with most checks. Further checks happen later on too. */ 4676 4677 dev->is_mounted = 1; 4678 4679 /* OK now calculate a few things for the device */ 4680 4681 /* 4682 * Calculate all the chunk size manipulation numbers: 4683 */ 4684 x = dev->data_bytes_per_chunk; 4685 /* We always use dev->chunk_shift and dev->chunk_div */ 4686 dev->chunk_shift = calc_shifts(x); 4687 x >>= dev->chunk_shift; 4688 dev->chunk_div = x; 4689 /* We only use chunk mask if chunk_div is 1 */ 4690 dev->chunk_mask = (1 << dev->chunk_shift) - 1; 4691 4692 /* 4693 * Calculate chunk_grp_bits. 4694 * We need to find the next power of 2 > than internal_end_block 4695 */ 4696 4697 x = dev->param.chunks_per_block * (dev->internal_end_block + 1); 4698 4699 bits = calc_shifts_ceiling(x); 4700 4701 /* Set up tnode width if wide tnodes are enabled. */ 4702 if (!dev->param.wide_tnodes_disabled) { 4703 /* bits must be even so that we end up with 32-bit words */ 4704 if (bits & 1) 4705 bits++; 4706 if (bits < 16) 4707 dev->tnode_width = 16; 4708 else 4709 dev->tnode_width = bits; 4710 } else { 4711 dev->tnode_width = 16; 4712 } 4713 4714 dev->tnode_mask = (1 << dev->tnode_width) - 1; 4715 4716 /* Level0 Tnodes are 16 bits or wider (if wide tnodes are enabled), 4717 * so if the bitwidth of the 4718 * chunk range we're using is greater than 16 we need 4719 * to figure out chunk shift and chunk_grp_size 4720 */ 4721 4722 if (bits <= dev->tnode_width) 4723 dev->chunk_grp_bits = 0; 4724 else 4725 dev->chunk_grp_bits = bits - dev->tnode_width; 4726 4727 dev->tnode_size = (dev->tnode_width * YAFFS_NTNODES_LEVEL0) / 8; 4728 if (dev->tnode_size < sizeof(struct yaffs_tnode)) 4729 dev->tnode_size = sizeof(struct yaffs_tnode); 4730 4731 dev->chunk_grp_size = 1 << dev->chunk_grp_bits; 4732 4733 if (dev->param.chunks_per_block < dev->chunk_grp_size) { 4734 /* We have a problem because the soft delete won't work if 4735 * the chunk group size > chunks per block. 4736 * This can be remedied by using larger "virtual blocks". 4737 */ 4738 yaffs_trace(YAFFS_TRACE_ALWAYS, "chunk group too large"); 4739 4740 return YAFFS_FAIL; 4741 } 4742 4743 /* Finished verifying the device, continue with initialisation */ 4744 4745 /* More device initialisation */ 4746 dev->all_gcs = 0; 4747 dev->passive_gc_count = 0; 4748 dev->oldest_dirty_gc_count = 0; 4749 dev->bg_gcs = 0; 4750 dev->gc_block_finder = 0; 4751 dev->buffered_block = -1; 4752 dev->doing_buffered_block_rewrite = 0; 4753 dev->n_deleted_files = 0; 4754 dev->n_bg_deletions = 0; 4755 dev->n_unlinked_files = 0; 4756 dev->n_ecc_fixed = 0; 4757 dev->n_ecc_unfixed = 0; 4758 dev->n_tags_ecc_fixed = 0; 4759 dev->n_tags_ecc_unfixed = 0; 4760 dev->n_erase_failures = 0; 4761 dev->n_erased_blocks = 0; 4762 dev->gc_disable = 0; 4763 dev->has_pending_prioritised_gc = 1; 4764 /* Assume the worst for now, will get fixed on first GC */ 4765 INIT_LIST_HEAD(&dev->dirty_dirs); 4766 dev->oldest_dirty_seq = 0; 4767 dev->oldest_dirty_block = 0; 4768 4769 /* Initialise temporary buffers and caches. */ 4770 if (!yaffs_init_tmp_buffers(dev)) 4771 init_failed = 1; 4772 4773 dev->cache = NULL; 4774 dev->gc_cleanup_list = NULL; 4775 4776 if (!init_failed && dev->param.n_caches > 0) { 4777 int i; 4778 void *buf; 4779 int cache_bytes = 4780 dev->param.n_caches * sizeof(struct yaffs_cache); 4781 4782 if (dev->param.n_caches > YAFFS_MAX_SHORT_OP_CACHES) 4783 dev->param.n_caches = YAFFS_MAX_SHORT_OP_CACHES; 4784 4785 dev->cache = kmalloc(cache_bytes, GFP_NOFS); 4786 4787 buf = (u8 *) dev->cache; 4788 4789 if (dev->cache) 4790 memset(dev->cache, 0, cache_bytes); 4791 4792 for (i = 0; i < dev->param.n_caches && buf; i++) { 4793 dev->cache[i].object = NULL; 4794 dev->cache[i].last_use = 0; 4795 dev->cache[i].dirty = 0; 4796 dev->cache[i].data = buf = 4797 kmalloc(dev->param.total_bytes_per_chunk, GFP_NOFS); 4798 } 4799 if (!buf) 4800 init_failed = 1; 4801 4802 dev->cache_last_use = 0; 4803 } 4804 4805 dev->cache_hits = 0; 4806 4807 if (!init_failed) { 4808 dev->gc_cleanup_list = 4809 kmalloc(dev->param.chunks_per_block * sizeof(u32), 4810 GFP_NOFS); 4811 if (!dev->gc_cleanup_list) 4812 init_failed = 1; 4813 } 4814 4815 if (dev->param.is_yaffs2) 4816 dev->param.use_header_file_size = 1; 4817 4818 if (!init_failed && !yaffs_init_blocks(dev)) 4819 init_failed = 1; 4820 4821 yaffs_init_tnodes_and_objs(dev); 4822 4823 if (!init_failed && !yaffs_create_initial_dir(dev)) 4824 init_failed = 1; 4825 4826 if (!init_failed && dev->param.is_yaffs2 && 4827 !dev->param.disable_summary && 4828 !yaffs_summary_init(dev)) 4829 init_failed = 1; 4830 4831 if (!init_failed) { 4832 /* Now scan the flash. */ 4833 if (dev->param.is_yaffs2) { 4834 if (yaffs2_checkpt_restore(dev)) { 4835 yaffs_check_obj_details_loaded(dev->root_dir); 4836 yaffs_trace(YAFFS_TRACE_CHECKPOINT | 4837 YAFFS_TRACE_MOUNT, 4838 "yaffs: restored from checkpoint" 4839 ); 4840 } else { 4841 4842 /* Clean up the mess caused by an aborted 4843 * checkpoint load then scan backwards. 4844 */ 4845 yaffs_deinit_blocks(dev); 4846 4847 yaffs_deinit_tnodes_and_objs(dev); 4848 4849 dev->n_erased_blocks = 0; 4850 dev->n_free_chunks = 0; 4851 dev->alloc_block = -1; 4852 dev->alloc_page = -1; 4853 dev->n_deleted_files = 0; 4854 dev->n_unlinked_files = 0; 4855 dev->n_bg_deletions = 0; 4856 4857 if (!init_failed && !yaffs_init_blocks(dev)) 4858 init_failed = 1; 4859 4860 yaffs_init_tnodes_and_objs(dev); 4861 4862 if (!init_failed 4863 && !yaffs_create_initial_dir(dev)) 4864 init_failed = 1; 4865 4866 if (!init_failed && !yaffs2_scan_backwards(dev)) 4867 init_failed = 1; 4868 } 4869 } else if (!yaffs1_scan(dev)) { 4870 init_failed = 1; 4871 } 4872 4873 yaffs_strip_deleted_objs(dev); 4874 yaffs_fix_hanging_objs(dev); 4875 if (dev->param.empty_lost_n_found) 4876 yaffs_empty_l_n_f(dev); 4877 } 4878 4879 if (init_failed) { 4880 /* Clean up the mess */ 4881 yaffs_trace(YAFFS_TRACE_TRACING, 4882 "yaffs: yaffs_guts_initialise() aborted."); 4883 4884 yaffs_deinitialise(dev); 4885 return YAFFS_FAIL; 4886 } 4887 4888 /* Zero out stats */ 4889 dev->n_page_reads = 0; 4890 dev->n_page_writes = 0; 4891 dev->n_erasures = 0; 4892 dev->n_gc_copies = 0; 4893 dev->n_retried_writes = 0; 4894 4895 dev->n_retired_blocks = 0; 4896 4897 yaffs_verify_free_chunks(dev); 4898 yaffs_verify_blocks(dev); 4899 4900 /* Clean up any aborted checkpoint data */ 4901 if (!dev->is_checkpointed && dev->blocks_in_checkpt > 0) 4902 yaffs2_checkpt_invalidate(dev); 4903 4904 yaffs_trace(YAFFS_TRACE_TRACING, 4905 "yaffs: yaffs_guts_initialise() done."); 4906 return YAFFS_OK; 4907 } 4908 4909 void yaffs_deinitialise(struct yaffs_dev *dev) 4910 { 4911 if (dev->is_mounted) { 4912 int i; 4913 4914 yaffs_deinit_blocks(dev); 4915 yaffs_deinit_tnodes_and_objs(dev); 4916 yaffs_summary_deinit(dev); 4917 4918 if (dev->param.n_caches > 0 && dev->cache) { 4919 4920 for (i = 0; i < dev->param.n_caches; i++) { 4921 kfree(dev->cache[i].data); 4922 dev->cache[i].data = NULL; 4923 } 4924 4925 kfree(dev->cache); 4926 dev->cache = NULL; 4927 } 4928 4929 kfree(dev->gc_cleanup_list); 4930 4931 for (i = 0; i < YAFFS_N_TEMP_BUFFERS; i++) 4932 kfree(dev->temp_buffer[i].buffer); 4933 4934 dev->is_mounted = 0; 4935 4936 if (dev->param.deinitialise_flash_fn) 4937 dev->param.deinitialise_flash_fn(dev); 4938 } 4939 } 4940 4941 int yaffs_count_free_chunks(struct yaffs_dev *dev) 4942 { 4943 int n_free = 0; 4944 int b; 4945 struct yaffs_block_info *blk; 4946 4947 blk = dev->block_info; 4948 for (b = dev->internal_start_block; b <= dev->internal_end_block; b++) { 4949 switch (blk->block_state) { 4950 case YAFFS_BLOCK_STATE_EMPTY: 4951 case YAFFS_BLOCK_STATE_ALLOCATING: 4952 case YAFFS_BLOCK_STATE_COLLECTING: 4953 case YAFFS_BLOCK_STATE_FULL: 4954 n_free += 4955 (dev->param.chunks_per_block - blk->pages_in_use + 4956 blk->soft_del_pages); 4957 break; 4958 default: 4959 break; 4960 } 4961 blk++; 4962 } 4963 return n_free; 4964 } 4965 4966 int yaffs_get_n_free_chunks(struct yaffs_dev *dev) 4967 { 4968 /* This is what we report to the outside world */ 4969 int n_free; 4970 int n_dirty_caches; 4971 int blocks_for_checkpt; 4972 int i; 4973 4974 n_free = dev->n_free_chunks; 4975 n_free += dev->n_deleted_files; 4976 4977 /* Now count and subtract the number of dirty chunks in the cache. */ 4978 4979 for (n_dirty_caches = 0, i = 0; i < dev->param.n_caches; i++) { 4980 if (dev->cache[i].dirty) 4981 n_dirty_caches++; 4982 } 4983 4984 n_free -= n_dirty_caches; 4985 4986 n_free -= 4987 ((dev->param.n_reserved_blocks + 1) * dev->param.chunks_per_block); 4988 4989 /* Now figure checkpoint space and report that... */ 4990 blocks_for_checkpt = yaffs_calc_checkpt_blocks_required(dev); 4991 4992 n_free -= (blocks_for_checkpt * dev->param.chunks_per_block); 4993 4994 if (n_free < 0) 4995 n_free = 0; 4996 4997 return n_free; 4998 } 4999 5000 /*\ 5001 * Marshalling functions to get loff_t file sizes into aand out of 5002 * object headers. 5003 */ 5004 void yaffs_oh_size_load(struct yaffs_obj_hdr *oh, loff_t fsize) 5005 { 5006 oh->file_size_low = (fsize & 0xFFFFFFFF); 5007 oh->file_size_high = ((fsize >> 32) & 0xFFFFFFFF); 5008 } 5009 5010 loff_t yaffs_oh_to_size(struct yaffs_obj_hdr *oh) 5011 { 5012 loff_t retval; 5013 5014 if (~(oh->file_size_high)) 5015 retval = (((loff_t) oh->file_size_high) << 32) | 5016 (((loff_t) oh->file_size_low) & 0xFFFFFFFF); 5017 else 5018 retval = (loff_t) oh->file_size_low; 5019 5020 return retval; 5021 } 5022