1 /* 2 * This file is part of UBIFS. 3 * 4 * Copyright (C) 2006-2008 Nokia Corporation. 5 * Copyright (C) 2006, 2007 University of Szeged, Hungary 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms of the GNU General Public License version 2 as published by 9 * the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program; if not, write to the Free Software Foundation, Inc., 51 18 * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 19 * 20 * Authors: Artem Bityutskiy (Битюцкий Артём) 21 * Adrian Hunter 22 * Zoltan Sogor 23 */ 24 25 /* 26 * This file implements UBIFS I/O subsystem which provides various I/O-related 27 * helper functions (reading/writing/checking/validating nodes) and implements 28 * write-buffering support. Write buffers help to save space which otherwise 29 * would have been wasted for padding to the nearest minimal I/O unit boundary. 30 * Instead, data first goes to the write-buffer and is flushed when the 31 * buffer is full or when it is not used for some time (by timer). This is 32 * similar to the mechanism is used by JFFS2. 33 * 34 * Write-buffers are defined by 'struct ubifs_wbuf' objects and protected by 35 * mutexes defined inside these objects. Since sometimes upper-level code 36 * has to lock the write-buffer (e.g. journal space reservation code), many 37 * functions related to write-buffers have "nolock" suffix which means that the 38 * caller has to lock the write-buffer before calling this function. 39 * 40 * UBIFS stores nodes at 64 bit-aligned addresses. If the node length is not 41 * aligned, UBIFS starts the next node from the aligned address, and the padded 42 * bytes may contain any rubbish. In other words, UBIFS does not put padding 43 * bytes in those small gaps. Common headers of nodes store real node lengths, 44 * not aligned lengths. Indexing nodes also store real lengths in branches. 45 * 46 * UBIFS uses padding when it pads to the next min. I/O unit. In this case it 47 * uses padding nodes or padding bytes, if the padding node does not fit. 48 * 49 * All UBIFS nodes are protected by CRC checksums and UBIFS checks all nodes 50 * every time they are read from the flash media. 51 */ 52 53 #include <linux/crc32.h> 54 #include <linux/slab.h> 55 #include "ubifs.h" 56 57 /** 58 * ubifs_ro_mode - switch UBIFS to read read-only mode. 59 * @c: UBIFS file-system description object 60 * @err: error code which is the reason of switching to R/O mode 61 */ 62 void ubifs_ro_mode(struct ubifs_info *c, int err) 63 { 64 if (!c->ro_error) { 65 c->ro_error = 1; 66 c->no_chk_data_crc = 0; 67 c->vfs_sb->s_flags |= MS_RDONLY; 68 ubifs_warn("switched to read-only mode, error %d", err); 69 dbg_dump_stack(); 70 } 71 } 72 73 /** 74 * ubifs_check_node - check node. 75 * @c: UBIFS file-system description object 76 * @buf: node to check 77 * @lnum: logical eraseblock number 78 * @offs: offset within the logical eraseblock 79 * @quiet: print no messages 80 * @must_chk_crc: indicates whether to always check the CRC 81 * 82 * This function checks node magic number and CRC checksum. This function also 83 * validates node length to prevent UBIFS from becoming crazy when an attacker 84 * feeds it a file-system image with incorrect nodes. For example, too large 85 * node length in the common header could cause UBIFS to read memory outside of 86 * allocated buffer when checking the CRC checksum. 87 * 88 * This function may skip data nodes CRC checking if @c->no_chk_data_crc is 89 * true, which is controlled by corresponding UBIFS mount option. However, if 90 * @must_chk_crc is true, then @c->no_chk_data_crc is ignored and CRC is 91 * checked. Similarly, if @c->always_chk_crc is true, @c->no_chk_data_crc is 92 * ignored and CRC is checked. 93 * 94 * This function returns zero in case of success and %-EUCLEAN in case of bad 95 * CRC or magic. 96 */ 97 int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum, 98 int offs, int quiet, int must_chk_crc) 99 { 100 int err = -EINVAL, type, node_len; 101 uint32_t crc, node_crc, magic; 102 const struct ubifs_ch *ch = buf; 103 104 ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0); 105 ubifs_assert(!(offs & 7) && offs < c->leb_size); 106 107 magic = le32_to_cpu(ch->magic); 108 if (magic != UBIFS_NODE_MAGIC) { 109 if (!quiet) 110 ubifs_err("bad magic %#08x, expected %#08x", 111 magic, UBIFS_NODE_MAGIC); 112 err = -EUCLEAN; 113 goto out; 114 } 115 116 type = ch->node_type; 117 if (type < 0 || type >= UBIFS_NODE_TYPES_CNT) { 118 if (!quiet) 119 ubifs_err("bad node type %d", type); 120 goto out; 121 } 122 123 node_len = le32_to_cpu(ch->len); 124 if (node_len + offs > c->leb_size) 125 goto out_len; 126 127 if (c->ranges[type].max_len == 0) { 128 if (node_len != c->ranges[type].len) 129 goto out_len; 130 } else if (node_len < c->ranges[type].min_len || 131 node_len > c->ranges[type].max_len) 132 goto out_len; 133 134 if (!must_chk_crc && type == UBIFS_DATA_NODE && !c->always_chk_crc && 135 c->no_chk_data_crc) 136 return 0; 137 138 crc = crc32(UBIFS_CRC32_INIT, buf + 8, node_len - 8); 139 node_crc = le32_to_cpu(ch->crc); 140 if (crc != node_crc) { 141 if (!quiet) 142 ubifs_err("bad CRC: calculated %#08x, read %#08x", 143 crc, node_crc); 144 err = -EUCLEAN; 145 goto out; 146 } 147 148 return 0; 149 150 out_len: 151 if (!quiet) 152 ubifs_err("bad node length %d", node_len); 153 out: 154 if (!quiet) { 155 ubifs_err("bad node at LEB %d:%d", lnum, offs); 156 dbg_dump_node(c, buf); 157 dbg_dump_stack(); 158 } 159 return err; 160 } 161 162 /** 163 * ubifs_pad - pad flash space. 164 * @c: UBIFS file-system description object 165 * @buf: buffer to put padding to 166 * @pad: how many bytes to pad 167 * 168 * The flash media obliges us to write only in chunks of %c->min_io_size and 169 * when we have to write less data we add padding node to the write-buffer and 170 * pad it to the next minimal I/O unit's boundary. Padding nodes help when the 171 * media is being scanned. If the amount of wasted space is not enough to fit a 172 * padding node which takes %UBIFS_PAD_NODE_SZ bytes, we write padding bytes 173 * pattern (%UBIFS_PADDING_BYTE). 174 * 175 * Padding nodes are also used to fill gaps when the "commit-in-gaps" method is 176 * used. 177 */ 178 void ubifs_pad(const struct ubifs_info *c, void *buf, int pad) 179 { 180 uint32_t crc; 181 182 ubifs_assert(pad >= 0 && !(pad & 7)); 183 184 if (pad >= UBIFS_PAD_NODE_SZ) { 185 struct ubifs_ch *ch = buf; 186 struct ubifs_pad_node *pad_node = buf; 187 188 ch->magic = cpu_to_le32(UBIFS_NODE_MAGIC); 189 ch->node_type = UBIFS_PAD_NODE; 190 ch->group_type = UBIFS_NO_NODE_GROUP; 191 ch->padding[0] = ch->padding[1] = 0; 192 ch->sqnum = 0; 193 ch->len = cpu_to_le32(UBIFS_PAD_NODE_SZ); 194 pad -= UBIFS_PAD_NODE_SZ; 195 pad_node->pad_len = cpu_to_le32(pad); 196 crc = crc32(UBIFS_CRC32_INIT, buf + 8, UBIFS_PAD_NODE_SZ - 8); 197 ch->crc = cpu_to_le32(crc); 198 memset(buf + UBIFS_PAD_NODE_SZ, 0, pad); 199 } else if (pad > 0) 200 /* Too little space, padding node won't fit */ 201 memset(buf, UBIFS_PADDING_BYTE, pad); 202 } 203 204 /** 205 * next_sqnum - get next sequence number. 206 * @c: UBIFS file-system description object 207 */ 208 static unsigned long long next_sqnum(struct ubifs_info *c) 209 { 210 unsigned long long sqnum; 211 212 spin_lock(&c->cnt_lock); 213 sqnum = ++c->max_sqnum; 214 spin_unlock(&c->cnt_lock); 215 216 if (unlikely(sqnum >= SQNUM_WARN_WATERMARK)) { 217 if (sqnum >= SQNUM_WATERMARK) { 218 ubifs_err("sequence number overflow %llu, end of life", 219 sqnum); 220 ubifs_ro_mode(c, -EINVAL); 221 } 222 ubifs_warn("running out of sequence numbers, end of life soon"); 223 } 224 225 return sqnum; 226 } 227 228 /** 229 * ubifs_prepare_node - prepare node to be written to flash. 230 * @c: UBIFS file-system description object 231 * @node: the node to pad 232 * @len: node length 233 * @pad: if the buffer has to be padded 234 * 235 * This function prepares node at @node to be written to the media - it 236 * calculates node CRC, fills the common header, and adds proper padding up to 237 * the next minimum I/O unit if @pad is not zero. 238 */ 239 void ubifs_prepare_node(struct ubifs_info *c, void *node, int len, int pad) 240 { 241 uint32_t crc; 242 struct ubifs_ch *ch = node; 243 unsigned long long sqnum = next_sqnum(c); 244 245 ubifs_assert(len >= UBIFS_CH_SZ); 246 247 ch->magic = cpu_to_le32(UBIFS_NODE_MAGIC); 248 ch->len = cpu_to_le32(len); 249 ch->group_type = UBIFS_NO_NODE_GROUP; 250 ch->sqnum = cpu_to_le64(sqnum); 251 ch->padding[0] = ch->padding[1] = 0; 252 crc = crc32(UBIFS_CRC32_INIT, node + 8, len - 8); 253 ch->crc = cpu_to_le32(crc); 254 255 if (pad) { 256 len = ALIGN(len, 8); 257 pad = ALIGN(len, c->min_io_size) - len; 258 ubifs_pad(c, node + len, pad); 259 } 260 } 261 262 /** 263 * ubifs_prep_grp_node - prepare node of a group to be written to flash. 264 * @c: UBIFS file-system description object 265 * @node: the node to pad 266 * @len: node length 267 * @last: indicates the last node of the group 268 * 269 * This function prepares node at @node to be written to the media - it 270 * calculates node CRC and fills the common header. 271 */ 272 void ubifs_prep_grp_node(struct ubifs_info *c, void *node, int len, int last) 273 { 274 uint32_t crc; 275 struct ubifs_ch *ch = node; 276 unsigned long long sqnum = next_sqnum(c); 277 278 ubifs_assert(len >= UBIFS_CH_SZ); 279 280 ch->magic = cpu_to_le32(UBIFS_NODE_MAGIC); 281 ch->len = cpu_to_le32(len); 282 if (last) 283 ch->group_type = UBIFS_LAST_OF_NODE_GROUP; 284 else 285 ch->group_type = UBIFS_IN_NODE_GROUP; 286 ch->sqnum = cpu_to_le64(sqnum); 287 ch->padding[0] = ch->padding[1] = 0; 288 crc = crc32(UBIFS_CRC32_INIT, node + 8, len - 8); 289 ch->crc = cpu_to_le32(crc); 290 } 291 292 /** 293 * wbuf_timer_callback - write-buffer timer callback function. 294 * @data: timer data (write-buffer descriptor) 295 * 296 * This function is called when the write-buffer timer expires. 297 */ 298 static enum hrtimer_restart wbuf_timer_callback_nolock(struct hrtimer *timer) 299 { 300 struct ubifs_wbuf *wbuf = container_of(timer, struct ubifs_wbuf, timer); 301 302 dbg_io("jhead %s", dbg_jhead(wbuf->jhead)); 303 wbuf->need_sync = 1; 304 wbuf->c->need_wbuf_sync = 1; 305 ubifs_wake_up_bgt(wbuf->c); 306 return HRTIMER_NORESTART; 307 } 308 309 /** 310 * new_wbuf_timer - start new write-buffer timer. 311 * @wbuf: write-buffer descriptor 312 */ 313 static void new_wbuf_timer_nolock(struct ubifs_wbuf *wbuf) 314 { 315 ubifs_assert(!hrtimer_active(&wbuf->timer)); 316 317 if (wbuf->no_timer) 318 return; 319 dbg_io("set timer for jhead %s, %llu-%llu millisecs", 320 dbg_jhead(wbuf->jhead), 321 div_u64(ktime_to_ns(wbuf->softlimit), USEC_PER_SEC), 322 div_u64(ktime_to_ns(wbuf->softlimit) + wbuf->delta, 323 USEC_PER_SEC)); 324 hrtimer_start_range_ns(&wbuf->timer, wbuf->softlimit, wbuf->delta, 325 HRTIMER_MODE_REL); 326 } 327 328 /** 329 * cancel_wbuf_timer - cancel write-buffer timer. 330 * @wbuf: write-buffer descriptor 331 */ 332 static void cancel_wbuf_timer_nolock(struct ubifs_wbuf *wbuf) 333 { 334 if (wbuf->no_timer) 335 return; 336 wbuf->need_sync = 0; 337 hrtimer_cancel(&wbuf->timer); 338 } 339 340 /** 341 * ubifs_wbuf_sync_nolock - synchronize write-buffer. 342 * @wbuf: write-buffer to synchronize 343 * 344 * This function synchronizes write-buffer @buf and returns zero in case of 345 * success or a negative error code in case of failure. 346 */ 347 int ubifs_wbuf_sync_nolock(struct ubifs_wbuf *wbuf) 348 { 349 struct ubifs_info *c = wbuf->c; 350 int err, dirt; 351 352 cancel_wbuf_timer_nolock(wbuf); 353 if (!wbuf->used || wbuf->lnum == -1) 354 /* Write-buffer is empty or not seeked */ 355 return 0; 356 357 dbg_io("LEB %d:%d, %d bytes, jhead %s", 358 wbuf->lnum, wbuf->offs, wbuf->used, dbg_jhead(wbuf->jhead)); 359 ubifs_assert(!(wbuf->avail & 7)); 360 ubifs_assert(wbuf->offs + c->min_io_size <= c->leb_size); 361 ubifs_assert(!c->ro_media && !c->ro_mount); 362 363 if (c->ro_error) 364 return -EROFS; 365 366 ubifs_pad(c, wbuf->buf + wbuf->used, wbuf->avail); 367 err = ubi_leb_write(c->ubi, wbuf->lnum, wbuf->buf, wbuf->offs, 368 c->min_io_size, wbuf->dtype); 369 if (err) { 370 ubifs_err("cannot write %d bytes to LEB %d:%d", 371 c->min_io_size, wbuf->lnum, wbuf->offs); 372 dbg_dump_stack(); 373 return err; 374 } 375 376 dirt = wbuf->avail; 377 378 spin_lock(&wbuf->lock); 379 wbuf->offs += c->min_io_size; 380 wbuf->avail = c->min_io_size; 381 wbuf->used = 0; 382 wbuf->next_ino = 0; 383 spin_unlock(&wbuf->lock); 384 385 if (wbuf->sync_callback) 386 err = wbuf->sync_callback(c, wbuf->lnum, 387 c->leb_size - wbuf->offs, dirt); 388 return err; 389 } 390 391 /** 392 * ubifs_wbuf_seek_nolock - seek write-buffer. 393 * @wbuf: write-buffer 394 * @lnum: logical eraseblock number to seek to 395 * @offs: logical eraseblock offset to seek to 396 * @dtype: data type 397 * 398 * This function targets the write-buffer to logical eraseblock @lnum:@offs. 399 * The write-buffer is synchronized if it is not empty. Returns zero in case of 400 * success and a negative error code in case of failure. 401 */ 402 int ubifs_wbuf_seek_nolock(struct ubifs_wbuf *wbuf, int lnum, int offs, 403 int dtype) 404 { 405 const struct ubifs_info *c = wbuf->c; 406 407 dbg_io("LEB %d:%d, jhead %s", lnum, offs, dbg_jhead(wbuf->jhead)); 408 ubifs_assert(lnum >= 0 && lnum < c->leb_cnt); 409 ubifs_assert(offs >= 0 && offs <= c->leb_size); 410 ubifs_assert(offs % c->min_io_size == 0 && !(offs & 7)); 411 ubifs_assert(lnum != wbuf->lnum); 412 413 if (wbuf->used > 0) { 414 int err = ubifs_wbuf_sync_nolock(wbuf); 415 416 if (err) 417 return err; 418 } 419 420 spin_lock(&wbuf->lock); 421 wbuf->lnum = lnum; 422 wbuf->offs = offs; 423 wbuf->avail = c->min_io_size; 424 wbuf->used = 0; 425 spin_unlock(&wbuf->lock); 426 wbuf->dtype = dtype; 427 428 return 0; 429 } 430 431 /** 432 * ubifs_bg_wbufs_sync - synchronize write-buffers. 433 * @c: UBIFS file-system description object 434 * 435 * This function is called by background thread to synchronize write-buffers. 436 * Returns zero in case of success and a negative error code in case of 437 * failure. 438 */ 439 int ubifs_bg_wbufs_sync(struct ubifs_info *c) 440 { 441 int err, i; 442 443 ubifs_assert(!c->ro_media && !c->ro_mount); 444 if (!c->need_wbuf_sync) 445 return 0; 446 c->need_wbuf_sync = 0; 447 448 if (c->ro_error) { 449 err = -EROFS; 450 goto out_timers; 451 } 452 453 dbg_io("synchronize"); 454 for (i = 0; i < c->jhead_cnt; i++) { 455 struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf; 456 457 cond_resched(); 458 459 /* 460 * If the mutex is locked then wbuf is being changed, so 461 * synchronization is not necessary. 462 */ 463 if (mutex_is_locked(&wbuf->io_mutex)) 464 continue; 465 466 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead); 467 if (!wbuf->need_sync) { 468 mutex_unlock(&wbuf->io_mutex); 469 continue; 470 } 471 472 err = ubifs_wbuf_sync_nolock(wbuf); 473 mutex_unlock(&wbuf->io_mutex); 474 if (err) { 475 ubifs_err("cannot sync write-buffer, error %d", err); 476 ubifs_ro_mode(c, err); 477 goto out_timers; 478 } 479 } 480 481 return 0; 482 483 out_timers: 484 /* Cancel all timers to prevent repeated errors */ 485 for (i = 0; i < c->jhead_cnt; i++) { 486 struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf; 487 488 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead); 489 cancel_wbuf_timer_nolock(wbuf); 490 mutex_unlock(&wbuf->io_mutex); 491 } 492 return err; 493 } 494 495 /** 496 * ubifs_wbuf_write_nolock - write data to flash via write-buffer. 497 * @wbuf: write-buffer 498 * @buf: node to write 499 * @len: node length 500 * 501 * This function writes data to flash via write-buffer @wbuf. This means that 502 * the last piece of the node won't reach the flash media immediately if it 503 * does not take whole minimal I/O unit. Instead, the node will sit in RAM 504 * until the write-buffer is synchronized (e.g., by timer). 505 * 506 * This function returns zero in case of success and a negative error code in 507 * case of failure. If the node cannot be written because there is no more 508 * space in this logical eraseblock, %-ENOSPC is returned. 509 */ 510 int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len) 511 { 512 struct ubifs_info *c = wbuf->c; 513 int err, written, n, aligned_len = ALIGN(len, 8), offs; 514 515 dbg_io("%d bytes (%s) to jhead %s wbuf at LEB %d:%d", len, 516 dbg_ntype(((struct ubifs_ch *)buf)->node_type), 517 dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs + wbuf->used); 518 ubifs_assert(len > 0 && wbuf->lnum >= 0 && wbuf->lnum < c->leb_cnt); 519 ubifs_assert(wbuf->offs >= 0 && wbuf->offs % c->min_io_size == 0); 520 ubifs_assert(!(wbuf->offs & 7) && wbuf->offs <= c->leb_size); 521 ubifs_assert(wbuf->avail > 0 && wbuf->avail <= c->min_io_size); 522 ubifs_assert(mutex_is_locked(&wbuf->io_mutex)); 523 ubifs_assert(!c->ro_media && !c->ro_mount); 524 525 if (c->leb_size - wbuf->offs - wbuf->used < aligned_len) { 526 err = -ENOSPC; 527 goto out; 528 } 529 530 cancel_wbuf_timer_nolock(wbuf); 531 532 if (c->ro_error) 533 return -EROFS; 534 535 if (aligned_len <= wbuf->avail) { 536 /* 537 * The node is not very large and fits entirely within 538 * write-buffer. 539 */ 540 memcpy(wbuf->buf + wbuf->used, buf, len); 541 542 if (aligned_len == wbuf->avail) { 543 dbg_io("flush jhead %s wbuf to LEB %d:%d", 544 dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs); 545 err = ubi_leb_write(c->ubi, wbuf->lnum, wbuf->buf, 546 wbuf->offs, c->min_io_size, 547 wbuf->dtype); 548 if (err) 549 goto out; 550 551 spin_lock(&wbuf->lock); 552 wbuf->offs += c->min_io_size; 553 wbuf->avail = c->min_io_size; 554 wbuf->used = 0; 555 wbuf->next_ino = 0; 556 spin_unlock(&wbuf->lock); 557 } else { 558 spin_lock(&wbuf->lock); 559 wbuf->avail -= aligned_len; 560 wbuf->used += aligned_len; 561 spin_unlock(&wbuf->lock); 562 } 563 564 goto exit; 565 } 566 567 /* 568 * The node is large enough and does not fit entirely within current 569 * minimal I/O unit. We have to fill and flush write-buffer and switch 570 * to the next min. I/O unit. 571 */ 572 dbg_io("flush jhead %s wbuf to LEB %d:%d", 573 dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs); 574 memcpy(wbuf->buf + wbuf->used, buf, wbuf->avail); 575 err = ubi_leb_write(c->ubi, wbuf->lnum, wbuf->buf, wbuf->offs, 576 c->min_io_size, wbuf->dtype); 577 if (err) 578 goto out; 579 580 offs = wbuf->offs + c->min_io_size; 581 len -= wbuf->avail; 582 aligned_len -= wbuf->avail; 583 written = wbuf->avail; 584 585 /* 586 * The remaining data may take more whole min. I/O units, so write the 587 * remains multiple to min. I/O unit size directly to the flash media. 588 * We align node length to 8-byte boundary because we anyway flash wbuf 589 * if the remaining space is less than 8 bytes. 590 */ 591 n = aligned_len >> c->min_io_shift; 592 if (n) { 593 n <<= c->min_io_shift; 594 dbg_io("write %d bytes to LEB %d:%d", n, wbuf->lnum, offs); 595 err = ubi_leb_write(c->ubi, wbuf->lnum, buf + written, offs, n, 596 wbuf->dtype); 597 if (err) 598 goto out; 599 offs += n; 600 aligned_len -= n; 601 len -= n; 602 written += n; 603 } 604 605 spin_lock(&wbuf->lock); 606 if (aligned_len) 607 /* 608 * And now we have what's left and what does not take whole 609 * min. I/O unit, so write it to the write-buffer and we are 610 * done. 611 */ 612 memcpy(wbuf->buf, buf + written, len); 613 614 wbuf->offs = offs; 615 wbuf->used = aligned_len; 616 wbuf->avail = c->min_io_size - aligned_len; 617 wbuf->next_ino = 0; 618 spin_unlock(&wbuf->lock); 619 620 exit: 621 if (wbuf->sync_callback) { 622 int free = c->leb_size - wbuf->offs - wbuf->used; 623 624 err = wbuf->sync_callback(c, wbuf->lnum, free, 0); 625 if (err) 626 goto out; 627 } 628 629 if (wbuf->used) 630 new_wbuf_timer_nolock(wbuf); 631 632 return 0; 633 634 out: 635 ubifs_err("cannot write %d bytes to LEB %d:%d, error %d", 636 len, wbuf->lnum, wbuf->offs, err); 637 dbg_dump_node(c, buf); 638 dbg_dump_stack(); 639 dbg_dump_leb(c, wbuf->lnum); 640 return err; 641 } 642 643 /** 644 * ubifs_write_node - write node to the media. 645 * @c: UBIFS file-system description object 646 * @buf: the node to write 647 * @len: node length 648 * @lnum: logical eraseblock number 649 * @offs: offset within the logical eraseblock 650 * @dtype: node life-time hint (%UBI_LONGTERM, %UBI_SHORTTERM, %UBI_UNKNOWN) 651 * 652 * This function automatically fills node magic number, assigns sequence 653 * number, and calculates node CRC checksum. The length of the @buf buffer has 654 * to be aligned to the minimal I/O unit size. This function automatically 655 * appends padding node and padding bytes if needed. Returns zero in case of 656 * success and a negative error code in case of failure. 657 */ 658 int ubifs_write_node(struct ubifs_info *c, void *buf, int len, int lnum, 659 int offs, int dtype) 660 { 661 int err, buf_len = ALIGN(len, c->min_io_size); 662 663 dbg_io("LEB %d:%d, %s, length %d (aligned %d)", 664 lnum, offs, dbg_ntype(((struct ubifs_ch *)buf)->node_type), len, 665 buf_len); 666 ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0); 667 ubifs_assert(offs % c->min_io_size == 0 && offs < c->leb_size); 668 ubifs_assert(!c->ro_media && !c->ro_mount); 669 670 if (c->ro_error) 671 return -EROFS; 672 673 ubifs_prepare_node(c, buf, len, 1); 674 err = ubi_leb_write(c->ubi, lnum, buf, offs, buf_len, dtype); 675 if (err) { 676 ubifs_err("cannot write %d bytes to LEB %d:%d, error %d", 677 buf_len, lnum, offs, err); 678 dbg_dump_node(c, buf); 679 dbg_dump_stack(); 680 } 681 682 return err; 683 } 684 685 /** 686 * ubifs_read_node_wbuf - read node from the media or write-buffer. 687 * @wbuf: wbuf to check for un-written data 688 * @buf: buffer to read to 689 * @type: node type 690 * @len: node length 691 * @lnum: logical eraseblock number 692 * @offs: offset within the logical eraseblock 693 * 694 * This function reads a node of known type and length, checks it and stores 695 * in @buf. If the node partially or fully sits in the write-buffer, this 696 * function takes data from the buffer, otherwise it reads the flash media. 697 * Returns zero in case of success, %-EUCLEAN if CRC mismatched and a negative 698 * error code in case of failure. 699 */ 700 int ubifs_read_node_wbuf(struct ubifs_wbuf *wbuf, void *buf, int type, int len, 701 int lnum, int offs) 702 { 703 const struct ubifs_info *c = wbuf->c; 704 int err, rlen, overlap; 705 struct ubifs_ch *ch = buf; 706 707 dbg_io("LEB %d:%d, %s, length %d, jhead %s", lnum, offs, 708 dbg_ntype(type), len, dbg_jhead(wbuf->jhead)); 709 ubifs_assert(wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0); 710 ubifs_assert(!(offs & 7) && offs < c->leb_size); 711 ubifs_assert(type >= 0 && type < UBIFS_NODE_TYPES_CNT); 712 713 spin_lock(&wbuf->lock); 714 overlap = (lnum == wbuf->lnum && offs + len > wbuf->offs); 715 if (!overlap) { 716 /* We may safely unlock the write-buffer and read the data */ 717 spin_unlock(&wbuf->lock); 718 return ubifs_read_node(c, buf, type, len, lnum, offs); 719 } 720 721 /* Don't read under wbuf */ 722 rlen = wbuf->offs - offs; 723 if (rlen < 0) 724 rlen = 0; 725 726 /* Copy the rest from the write-buffer */ 727 memcpy(buf + rlen, wbuf->buf + offs + rlen - wbuf->offs, len - rlen); 728 spin_unlock(&wbuf->lock); 729 730 if (rlen > 0) { 731 /* Read everything that goes before write-buffer */ 732 err = ubi_read(c->ubi, lnum, buf, offs, rlen); 733 if (err && err != -EBADMSG) { 734 ubifs_err("failed to read node %d from LEB %d:%d, " 735 "error %d", type, lnum, offs, err); 736 dbg_dump_stack(); 737 return err; 738 } 739 } 740 741 if (type != ch->node_type) { 742 ubifs_err("bad node type (%d but expected %d)", 743 ch->node_type, type); 744 goto out; 745 } 746 747 err = ubifs_check_node(c, buf, lnum, offs, 0, 0); 748 if (err) { 749 ubifs_err("expected node type %d", type); 750 return err; 751 } 752 753 rlen = le32_to_cpu(ch->len); 754 if (rlen != len) { 755 ubifs_err("bad node length %d, expected %d", rlen, len); 756 goto out; 757 } 758 759 return 0; 760 761 out: 762 ubifs_err("bad node at LEB %d:%d", lnum, offs); 763 dbg_dump_node(c, buf); 764 dbg_dump_stack(); 765 return -EINVAL; 766 } 767 768 /** 769 * ubifs_read_node - read node. 770 * @c: UBIFS file-system description object 771 * @buf: buffer to read to 772 * @type: node type 773 * @len: node length (not aligned) 774 * @lnum: logical eraseblock number 775 * @offs: offset within the logical eraseblock 776 * 777 * This function reads a node of known type and and length, checks it and 778 * stores in @buf. Returns zero in case of success, %-EUCLEAN if CRC mismatched 779 * and a negative error code in case of failure. 780 */ 781 int ubifs_read_node(const struct ubifs_info *c, void *buf, int type, int len, 782 int lnum, int offs) 783 { 784 int err, l; 785 struct ubifs_ch *ch = buf; 786 787 dbg_io("LEB %d:%d, %s, length %d", lnum, offs, dbg_ntype(type), len); 788 ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0); 789 ubifs_assert(len >= UBIFS_CH_SZ && offs + len <= c->leb_size); 790 ubifs_assert(!(offs & 7) && offs < c->leb_size); 791 ubifs_assert(type >= 0 && type < UBIFS_NODE_TYPES_CNT); 792 793 err = ubi_read(c->ubi, lnum, buf, offs, len); 794 if (err && err != -EBADMSG) { 795 ubifs_err("cannot read node %d from LEB %d:%d, error %d", 796 type, lnum, offs, err); 797 return err; 798 } 799 800 if (type != ch->node_type) { 801 ubifs_err("bad node type (%d but expected %d)", 802 ch->node_type, type); 803 goto out; 804 } 805 806 err = ubifs_check_node(c, buf, lnum, offs, 0, 0); 807 if (err) { 808 ubifs_err("expected node type %d", type); 809 return err; 810 } 811 812 l = le32_to_cpu(ch->len); 813 if (l != len) { 814 ubifs_err("bad node length %d, expected %d", l, len); 815 goto out; 816 } 817 818 return 0; 819 820 out: 821 ubifs_err("bad node at LEB %d:%d, LEB mapping status %d", lnum, offs, 822 ubi_is_mapped(c->ubi, lnum)); 823 dbg_dump_node(c, buf); 824 dbg_dump_stack(); 825 return -EINVAL; 826 } 827 828 /** 829 * ubifs_wbuf_init - initialize write-buffer. 830 * @c: UBIFS file-system description object 831 * @wbuf: write-buffer to initialize 832 * 833 * This function initializes write-buffer. Returns zero in case of success 834 * %-ENOMEM in case of failure. 835 */ 836 int ubifs_wbuf_init(struct ubifs_info *c, struct ubifs_wbuf *wbuf) 837 { 838 size_t size; 839 840 wbuf->buf = kmalloc(c->min_io_size, GFP_KERNEL); 841 if (!wbuf->buf) 842 return -ENOMEM; 843 844 size = (c->min_io_size / UBIFS_CH_SZ + 1) * sizeof(ino_t); 845 wbuf->inodes = kmalloc(size, GFP_KERNEL); 846 if (!wbuf->inodes) { 847 kfree(wbuf->buf); 848 wbuf->buf = NULL; 849 return -ENOMEM; 850 } 851 852 wbuf->used = 0; 853 wbuf->lnum = wbuf->offs = -1; 854 wbuf->avail = c->min_io_size; 855 wbuf->dtype = UBI_UNKNOWN; 856 wbuf->sync_callback = NULL; 857 mutex_init(&wbuf->io_mutex); 858 spin_lock_init(&wbuf->lock); 859 wbuf->c = c; 860 wbuf->next_ino = 0; 861 862 hrtimer_init(&wbuf->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 863 wbuf->timer.function = wbuf_timer_callback_nolock; 864 wbuf->softlimit = ktime_set(WBUF_TIMEOUT_SOFTLIMIT, 0); 865 wbuf->delta = WBUF_TIMEOUT_HARDLIMIT - WBUF_TIMEOUT_SOFTLIMIT; 866 wbuf->delta *= 1000000000ULL; 867 ubifs_assert(wbuf->delta <= ULONG_MAX); 868 return 0; 869 } 870 871 /** 872 * ubifs_wbuf_add_ino_nolock - add an inode number into the wbuf inode array. 873 * @wbuf: the write-buffer where to add 874 * @inum: the inode number 875 * 876 * This function adds an inode number to the inode array of the write-buffer. 877 */ 878 void ubifs_wbuf_add_ino_nolock(struct ubifs_wbuf *wbuf, ino_t inum) 879 { 880 if (!wbuf->buf) 881 /* NOR flash or something similar */ 882 return; 883 884 spin_lock(&wbuf->lock); 885 if (wbuf->used) 886 wbuf->inodes[wbuf->next_ino++] = inum; 887 spin_unlock(&wbuf->lock); 888 } 889 890 /** 891 * wbuf_has_ino - returns if the wbuf contains data from the inode. 892 * @wbuf: the write-buffer 893 * @inum: the inode number 894 * 895 * This function returns with %1 if the write-buffer contains some data from the 896 * given inode otherwise it returns with %0. 897 */ 898 static int wbuf_has_ino(struct ubifs_wbuf *wbuf, ino_t inum) 899 { 900 int i, ret = 0; 901 902 spin_lock(&wbuf->lock); 903 for (i = 0; i < wbuf->next_ino; i++) 904 if (inum == wbuf->inodes[i]) { 905 ret = 1; 906 break; 907 } 908 spin_unlock(&wbuf->lock); 909 910 return ret; 911 } 912 913 /** 914 * ubifs_sync_wbufs_by_inode - synchronize write-buffers for an inode. 915 * @c: UBIFS file-system description object 916 * @inode: inode to synchronize 917 * 918 * This function synchronizes write-buffers which contain nodes belonging to 919 * @inode. Returns zero in case of success and a negative error code in case of 920 * failure. 921 */ 922 int ubifs_sync_wbufs_by_inode(struct ubifs_info *c, struct inode *inode) 923 { 924 int i, err = 0; 925 926 for (i = 0; i < c->jhead_cnt; i++) { 927 struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf; 928 929 if (i == GCHD) 930 /* 931 * GC head is special, do not look at it. Even if the 932 * head contains something related to this inode, it is 933 * a _copy_ of corresponding on-flash node which sits 934 * somewhere else. 935 */ 936 continue; 937 938 if (!wbuf_has_ino(wbuf, inode->i_ino)) 939 continue; 940 941 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead); 942 if (wbuf_has_ino(wbuf, inode->i_ino)) 943 err = ubifs_wbuf_sync_nolock(wbuf); 944 mutex_unlock(&wbuf->io_mutex); 945 946 if (err) { 947 ubifs_ro_mode(c, err); 948 return err; 949 } 950 } 951 return 0; 952 } 953