1 /* 2 * JFFS2 -- Journalling Flash File System, Version 2. 3 * 4 * Copyright © 2001-2007 Red Hat, Inc. 5 * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org> 6 * 7 * Created by David Woodhouse <dwmw2@infradead.org> 8 * 9 * For licensing information, see the file 'LICENCE' in this directory. 10 * 11 */ 12 13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 15 #include <linux/kernel.h> 16 #include <linux/slab.h> 17 #include <linux/mtd/mtd.h> 18 #include <linux/compiler.h> 19 #include <linux/crc32.h> 20 #include <linux/sched.h> 21 #include <linux/pagemap.h> 22 #include "nodelist.h" 23 24 static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset); 25 static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); 26 static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); 27 28 static void jffs2_erase_block(struct jffs2_sb_info *c, 29 struct jffs2_eraseblock *jeb) 30 { 31 int ret; 32 uint32_t bad_offset; 33 #ifdef __ECOS 34 ret = jffs2_flash_erase(c, jeb); 35 if (!ret) { 36 jffs2_erase_succeeded(c, jeb); 37 return; 38 } 39 bad_offset = jeb->offset; 40 #else /* Linux */ 41 struct erase_info *instr; 42 43 jffs2_dbg(1, "%s(): erase block %#08x (range %#08x-%#08x)\n", 44 __func__, 45 jeb->offset, jeb->offset, jeb->offset + c->sector_size); 46 instr = kmalloc(sizeof(struct erase_info), GFP_KERNEL); 47 if (!instr) { 48 pr_warn("kmalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n"); 49 mutex_lock(&c->erase_free_sem); 50 spin_lock(&c->erase_completion_lock); 51 list_move(&jeb->list, &c->erase_pending_list); 52 c->erasing_size -= c->sector_size; 53 c->dirty_size += c->sector_size; 54 jeb->dirty_size = c->sector_size; 55 spin_unlock(&c->erase_completion_lock); 56 mutex_unlock(&c->erase_free_sem); 57 return; 58 } 59 60 memset(instr, 0, sizeof(*instr)); 61 62 instr->mtd = c->mtd; 63 instr->addr = jeb->offset; 64 instr->len = c->sector_size; 65 66 ret = mtd_erase(c->mtd, instr); 67 if (!ret) { 68 jffs2_erase_succeeded(c, jeb); 69 kfree(instr); 70 return; 71 } 72 73 bad_offset = instr->fail_addr; 74 kfree(instr); 75 #endif /* __ECOS */ 76 77 if (ret == -ENOMEM || ret == -EAGAIN) { 78 /* Erase failed immediately. Refile it on the list */ 79 jffs2_dbg(1, "Erase at 0x%08x failed: %d. Refiling on erase_pending_list\n", 80 jeb->offset, ret); 81 mutex_lock(&c->erase_free_sem); 82 spin_lock(&c->erase_completion_lock); 83 list_move(&jeb->list, &c->erase_pending_list); 84 c->erasing_size -= c->sector_size; 85 c->dirty_size += c->sector_size; 86 jeb->dirty_size = c->sector_size; 87 spin_unlock(&c->erase_completion_lock); 88 mutex_unlock(&c->erase_free_sem); 89 return; 90 } 91 92 if (ret == -EROFS) 93 pr_warn("Erase at 0x%08x failed immediately: -EROFS. Is the sector locked?\n", 94 jeb->offset); 95 else 96 pr_warn("Erase at 0x%08x failed immediately: errno %d\n", 97 jeb->offset, ret); 98 99 jffs2_erase_failed(c, jeb, bad_offset); 100 } 101 102 int jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count) 103 { 104 struct jffs2_eraseblock *jeb; 105 int work_done = 0; 106 107 mutex_lock(&c->erase_free_sem); 108 109 spin_lock(&c->erase_completion_lock); 110 111 while (!list_empty(&c->erase_complete_list) || 112 !list_empty(&c->erase_pending_list)) { 113 114 if (!list_empty(&c->erase_complete_list)) { 115 jeb = list_entry(c->erase_complete_list.next, struct jffs2_eraseblock, list); 116 list_move(&jeb->list, &c->erase_checking_list); 117 spin_unlock(&c->erase_completion_lock); 118 mutex_unlock(&c->erase_free_sem); 119 jffs2_mark_erased_block(c, jeb); 120 121 work_done++; 122 if (!--count) { 123 jffs2_dbg(1, "Count reached. jffs2_erase_pending_blocks leaving\n"); 124 goto done; 125 } 126 127 } else if (!list_empty(&c->erase_pending_list)) { 128 jeb = list_entry(c->erase_pending_list.next, struct jffs2_eraseblock, list); 129 jffs2_dbg(1, "Starting erase of pending block 0x%08x\n", 130 jeb->offset); 131 list_del(&jeb->list); 132 c->erasing_size += c->sector_size; 133 c->wasted_size -= jeb->wasted_size; 134 c->free_size -= jeb->free_size; 135 c->used_size -= jeb->used_size; 136 c->dirty_size -= jeb->dirty_size; 137 jeb->wasted_size = jeb->used_size = jeb->dirty_size = jeb->free_size = 0; 138 jffs2_free_jeb_node_refs(c, jeb); 139 list_add(&jeb->list, &c->erasing_list); 140 spin_unlock(&c->erase_completion_lock); 141 mutex_unlock(&c->erase_free_sem); 142 143 jffs2_erase_block(c, jeb); 144 145 } else { 146 BUG(); 147 } 148 149 /* Be nice */ 150 cond_resched(); 151 mutex_lock(&c->erase_free_sem); 152 spin_lock(&c->erase_completion_lock); 153 } 154 155 spin_unlock(&c->erase_completion_lock); 156 mutex_unlock(&c->erase_free_sem); 157 done: 158 jffs2_dbg(1, "jffs2_erase_pending_blocks completed\n"); 159 return work_done; 160 } 161 162 static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) 163 { 164 jffs2_dbg(1, "Erase completed successfully at 0x%08x\n", jeb->offset); 165 mutex_lock(&c->erase_free_sem); 166 spin_lock(&c->erase_completion_lock); 167 list_move_tail(&jeb->list, &c->erase_complete_list); 168 /* Wake the GC thread to mark them clean */ 169 jffs2_garbage_collect_trigger(c); 170 spin_unlock(&c->erase_completion_lock); 171 mutex_unlock(&c->erase_free_sem); 172 wake_up(&c->erase_wait); 173 } 174 175 static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset) 176 { 177 /* For NAND, if the failure did not occur at the device level for a 178 specific physical page, don't bother updating the bad block table. */ 179 if (jffs2_cleanmarker_oob(c) && (bad_offset != (uint32_t)MTD_FAIL_ADDR_UNKNOWN)) { 180 /* We had a device-level failure to erase. Let's see if we've 181 failed too many times. */ 182 if (!jffs2_write_nand_badblock(c, jeb, bad_offset)) { 183 /* We'd like to give this block another try. */ 184 mutex_lock(&c->erase_free_sem); 185 spin_lock(&c->erase_completion_lock); 186 list_move(&jeb->list, &c->erase_pending_list); 187 c->erasing_size -= c->sector_size; 188 c->dirty_size += c->sector_size; 189 jeb->dirty_size = c->sector_size; 190 spin_unlock(&c->erase_completion_lock); 191 mutex_unlock(&c->erase_free_sem); 192 return; 193 } 194 } 195 196 mutex_lock(&c->erase_free_sem); 197 spin_lock(&c->erase_completion_lock); 198 c->erasing_size -= c->sector_size; 199 c->bad_size += c->sector_size; 200 list_move(&jeb->list, &c->bad_list); 201 c->nr_erasing_blocks--; 202 spin_unlock(&c->erase_completion_lock); 203 mutex_unlock(&c->erase_free_sem); 204 wake_up(&c->erase_wait); 205 } 206 207 /* Hmmm. Maybe we should accept the extra space it takes and make 208 this a standard doubly-linked list? */ 209 static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c, 210 struct jffs2_raw_node_ref *ref, struct jffs2_eraseblock *jeb) 211 { 212 struct jffs2_inode_cache *ic = NULL; 213 struct jffs2_raw_node_ref **prev; 214 215 prev = &ref->next_in_ino; 216 217 /* Walk the inode's list once, removing any nodes from this eraseblock */ 218 while (1) { 219 if (!(*prev)->next_in_ino) { 220 /* We're looking at the jffs2_inode_cache, which is 221 at the end of the linked list. Stash it and continue 222 from the beginning of the list */ 223 ic = (struct jffs2_inode_cache *)(*prev); 224 prev = &ic->nodes; 225 continue; 226 } 227 228 if (SECTOR_ADDR((*prev)->flash_offset) == jeb->offset) { 229 /* It's in the block we're erasing */ 230 struct jffs2_raw_node_ref *this; 231 232 this = *prev; 233 *prev = this->next_in_ino; 234 this->next_in_ino = NULL; 235 236 if (this == ref) 237 break; 238 239 continue; 240 } 241 /* Not to be deleted. Skip */ 242 prev = &((*prev)->next_in_ino); 243 } 244 245 /* PARANOIA */ 246 if (!ic) { 247 JFFS2_WARNING("inode_cache/xattr_datum/xattr_ref" 248 " not found in remove_node_refs()!!\n"); 249 return; 250 } 251 252 jffs2_dbg(1, "Removed nodes in range 0x%08x-0x%08x from ino #%u\n", 253 jeb->offset, jeb->offset + c->sector_size, ic->ino); 254 255 D2({ 256 int i=0; 257 struct jffs2_raw_node_ref *this; 258 printk(KERN_DEBUG "After remove_node_refs_from_ino_list: \n"); 259 260 this = ic->nodes; 261 262 printk(KERN_DEBUG); 263 while(this) { 264 pr_cont("0x%08x(%d)->", 265 ref_offset(this), ref_flags(this)); 266 if (++i == 5) { 267 printk(KERN_DEBUG); 268 i=0; 269 } 270 this = this->next_in_ino; 271 } 272 pr_cont("\n"); 273 }); 274 275 switch (ic->class) { 276 #ifdef CONFIG_JFFS2_FS_XATTR 277 case RAWNODE_CLASS_XATTR_DATUM: 278 jffs2_release_xattr_datum(c, (struct jffs2_xattr_datum *)ic); 279 break; 280 case RAWNODE_CLASS_XATTR_REF: 281 jffs2_release_xattr_ref(c, (struct jffs2_xattr_ref *)ic); 282 break; 283 #endif 284 default: 285 if (ic->nodes == (void *)ic && ic->pino_nlink == 0) 286 jffs2_del_ino_cache(c, ic); 287 } 288 } 289 290 void jffs2_free_jeb_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) 291 { 292 struct jffs2_raw_node_ref *block, *ref; 293 jffs2_dbg(1, "Freeing all node refs for eraseblock offset 0x%08x\n", 294 jeb->offset); 295 296 block = ref = jeb->first_node; 297 298 while (ref) { 299 if (ref->flash_offset == REF_LINK_NODE) { 300 ref = ref->next_in_ino; 301 jffs2_free_refblock(block); 302 block = ref; 303 continue; 304 } 305 if (ref->flash_offset != REF_EMPTY_NODE && ref->next_in_ino) 306 jffs2_remove_node_refs_from_ino_list(c, ref, jeb); 307 /* else it was a non-inode node or already removed, so don't bother */ 308 309 ref++; 310 } 311 jeb->first_node = jeb->last_node = NULL; 312 } 313 314 static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t *bad_offset) 315 { 316 void *ebuf; 317 uint32_t ofs; 318 size_t retlen; 319 int ret; 320 unsigned long *wordebuf; 321 322 ret = mtd_point(c->mtd, jeb->offset, c->sector_size, &retlen, 323 &ebuf, NULL); 324 if (ret != -EOPNOTSUPP) { 325 if (ret) { 326 jffs2_dbg(1, "MTD point failed %d\n", ret); 327 goto do_flash_read; 328 } 329 if (retlen < c->sector_size) { 330 /* Don't muck about if it won't let us point to the whole erase sector */ 331 jffs2_dbg(1, "MTD point returned len too short: 0x%zx\n", 332 retlen); 333 mtd_unpoint(c->mtd, jeb->offset, retlen); 334 goto do_flash_read; 335 } 336 wordebuf = ebuf-sizeof(*wordebuf); 337 retlen /= sizeof(*wordebuf); 338 do { 339 if (*++wordebuf != ~0) 340 break; 341 } while(--retlen); 342 mtd_unpoint(c->mtd, jeb->offset, c->sector_size); 343 if (retlen) { 344 pr_warn("Newly-erased block contained word 0x%lx at offset 0x%08tx\n", 345 *wordebuf, 346 jeb->offset + 347 c->sector_size-retlen * sizeof(*wordebuf)); 348 return -EIO; 349 } 350 return 0; 351 } 352 do_flash_read: 353 ebuf = kmalloc(PAGE_SIZE, GFP_KERNEL); 354 if (!ebuf) { 355 pr_warn("Failed to allocate page buffer for verifying erase at 0x%08x. Refiling\n", 356 jeb->offset); 357 return -EAGAIN; 358 } 359 360 jffs2_dbg(1, "Verifying erase at 0x%08x\n", jeb->offset); 361 362 for (ofs = jeb->offset; ofs < jeb->offset + c->sector_size; ) { 363 uint32_t readlen = min((uint32_t)PAGE_SIZE, jeb->offset + c->sector_size - ofs); 364 int i; 365 366 *bad_offset = ofs; 367 368 ret = mtd_read(c->mtd, ofs, readlen, &retlen, ebuf); 369 if (ret) { 370 pr_warn("Read of newly-erased block at 0x%08x failed: %d. Putting on bad_list\n", 371 ofs, ret); 372 ret = -EIO; 373 goto fail; 374 } 375 if (retlen != readlen) { 376 pr_warn("Short read from newly-erased block at 0x%08x. Wanted %d, got %zd\n", 377 ofs, readlen, retlen); 378 ret = -EIO; 379 goto fail; 380 } 381 for (i=0; i<readlen; i += sizeof(unsigned long)) { 382 /* It's OK. We know it's properly aligned */ 383 unsigned long *datum = ebuf + i; 384 if (*datum + 1) { 385 *bad_offset += i; 386 pr_warn("Newly-erased block contained word 0x%lx at offset 0x%08x\n", 387 *datum, *bad_offset); 388 ret = -EIO; 389 goto fail; 390 } 391 } 392 ofs += readlen; 393 cond_resched(); 394 } 395 ret = 0; 396 fail: 397 kfree(ebuf); 398 return ret; 399 } 400 401 static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) 402 { 403 size_t retlen; 404 int ret; 405 uint32_t uninitialized_var(bad_offset); 406 407 switch (jffs2_block_check_erase(c, jeb, &bad_offset)) { 408 case -EAGAIN: goto refile; 409 case -EIO: goto filebad; 410 } 411 412 /* Write the erase complete marker */ 413 jffs2_dbg(1, "Writing erased marker to block at 0x%08x\n", jeb->offset); 414 bad_offset = jeb->offset; 415 416 /* Cleanmarker in oob area or no cleanmarker at all ? */ 417 if (jffs2_cleanmarker_oob(c) || c->cleanmarker_size == 0) { 418 419 if (jffs2_cleanmarker_oob(c)) { 420 if (jffs2_write_nand_cleanmarker(c, jeb)) 421 goto filebad; 422 } 423 } else { 424 425 struct kvec vecs[1]; 426 struct jffs2_unknown_node marker = { 427 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK), 428 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER), 429 .totlen = cpu_to_je32(c->cleanmarker_size) 430 }; 431 432 jffs2_prealloc_raw_node_refs(c, jeb, 1); 433 434 marker.hdr_crc = cpu_to_je32(crc32(0, &marker, sizeof(struct jffs2_unknown_node)-4)); 435 436 vecs[0].iov_base = (unsigned char *) ▮ 437 vecs[0].iov_len = sizeof(marker); 438 ret = jffs2_flash_direct_writev(c, vecs, 1, jeb->offset, &retlen); 439 440 if (ret || retlen != sizeof(marker)) { 441 if (ret) 442 pr_warn("Write clean marker to block at 0x%08x failed: %d\n", 443 jeb->offset, ret); 444 else 445 pr_warn("Short write to newly-erased block at 0x%08x: Wanted %zd, got %zd\n", 446 jeb->offset, sizeof(marker), retlen); 447 448 goto filebad; 449 } 450 } 451 /* Everything else got zeroed before the erase */ 452 jeb->free_size = c->sector_size; 453 454 mutex_lock(&c->erase_free_sem); 455 spin_lock(&c->erase_completion_lock); 456 457 c->erasing_size -= c->sector_size; 458 c->free_size += c->sector_size; 459 460 /* Account for cleanmarker now, if it's in-band */ 461 if (c->cleanmarker_size && !jffs2_cleanmarker_oob(c)) 462 jffs2_link_node_ref(c, jeb, jeb->offset | REF_NORMAL, c->cleanmarker_size, NULL); 463 464 list_move_tail(&jeb->list, &c->free_list); 465 c->nr_erasing_blocks--; 466 c->nr_free_blocks++; 467 468 jffs2_dbg_acct_sanity_check_nolock(c, jeb); 469 jffs2_dbg_acct_paranoia_check_nolock(c, jeb); 470 471 spin_unlock(&c->erase_completion_lock); 472 mutex_unlock(&c->erase_free_sem); 473 wake_up(&c->erase_wait); 474 return; 475 476 filebad: 477 jffs2_erase_failed(c, jeb, bad_offset); 478 return; 479 480 refile: 481 /* Stick it back on the list from whence it came and come back later */ 482 mutex_lock(&c->erase_free_sem); 483 spin_lock(&c->erase_completion_lock); 484 jffs2_garbage_collect_trigger(c); 485 list_move(&jeb->list, &c->erase_complete_list); 486 spin_unlock(&c->erase_completion_lock); 487 mutex_unlock(&c->erase_free_sem); 488 return; 489 } 490