1 /* 2 * This file is part of UBIFS. 3 * 4 * Copyright (C) 2006-2008 Nokia Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 as published by 8 * the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along with 16 * this program; if not, write to the Free Software Foundation, Inc., 51 17 * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 18 * 19 * Authors: Artem Bityutskiy (Битюцкий Артём) 20 * Adrian Hunter 21 */ 22 23 /* 24 * This file implements VFS file and inode operations for regular files, device 25 * nodes and symlinks as well as address space operations. 26 * 27 * UBIFS uses 2 page flags: @PG_private and @PG_checked. @PG_private is set if 28 * the page is dirty and is used for optimization purposes - dirty pages are 29 * not budgeted so the flag shows that 'ubifs_write_end()' should not release 30 * the budget for this page. The @PG_checked flag is set if full budgeting is 31 * required for the page e.g., when it corresponds to a file hole or it is 32 * beyond the file size. The budgeting is done in 'ubifs_write_begin()', because 33 * it is OK to fail in this function, and the budget is released in 34 * 'ubifs_write_end()'. So the @PG_private and @PG_checked flags carry 35 * information about how the page was budgeted, to make it possible to release 36 * the budget properly. 37 * 38 * A thing to keep in mind: inode @i_mutex is locked in most VFS operations we 39 * implement. However, this is not true for 'ubifs_writepage()', which may be 40 * called with @i_mutex unlocked. For example, when flusher thread is doing 41 * background write-back, it calls 'ubifs_writepage()' with unlocked @i_mutex. 42 * At "normal" work-paths the @i_mutex is locked in 'ubifs_writepage()', e.g. 43 * in the "sys_write -> alloc_pages -> direct reclaim path". So, in 44 * 'ubifs_writepage()' we are only guaranteed that the page is locked. 45 * 46 * Similarly, @i_mutex is not always locked in 'ubifs_readpage()', e.g., the 47 * read-ahead path does not lock it ("sys_read -> generic_file_aio_read -> 48 * ondemand_readahead -> readpage"). In case of readahead, @I_SYNC flag is not 49 * set as well. However, UBIFS disables readahead. 50 */ 51 52 #include "ubifs.h" 53 #include <linux/mount.h> 54 #include <linux/slab.h> 55 #include <linux/migrate.h> 56 57 static int read_block(struct inode *inode, void *addr, unsigned int block, 58 struct ubifs_data_node *dn) 59 { 60 struct ubifs_info *c = inode->i_sb->s_fs_info; 61 int err, len, out_len; 62 union ubifs_key key; 63 unsigned int dlen; 64 65 data_key_init(c, &key, inode->i_ino, block); 66 err = ubifs_tnc_lookup(c, &key, dn); 67 if (err) { 68 if (err == -ENOENT) 69 /* Not found, so it must be a hole */ 70 memset(addr, 0, UBIFS_BLOCK_SIZE); 71 return err; 72 } 73 74 ubifs_assert(c, le64_to_cpu(dn->ch.sqnum) > 75 ubifs_inode(inode)->creat_sqnum); 76 len = le32_to_cpu(dn->size); 77 if (len <= 0 || len > UBIFS_BLOCK_SIZE) 78 goto dump; 79 80 dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ; 81 82 if (ubifs_crypt_is_encrypted(inode)) { 83 err = ubifs_decrypt(inode, dn, &dlen, block); 84 if (err) 85 goto dump; 86 } 87 88 out_len = UBIFS_BLOCK_SIZE; 89 err = ubifs_decompress(c, &dn->data, dlen, addr, &out_len, 90 le16_to_cpu(dn->compr_type)); 91 if (err || len != out_len) 92 goto dump; 93 94 /* 95 * Data length can be less than a full block, even for blocks that are 96 * not the last in the file (e.g., as a result of making a hole and 97 * appending data). Ensure that the remainder is zeroed out. 98 */ 99 if (len < UBIFS_BLOCK_SIZE) 100 memset(addr + len, 0, UBIFS_BLOCK_SIZE - len); 101 102 return 0; 103 104 dump: 105 ubifs_err(c, "bad data node (block %u, inode %lu)", 106 block, inode->i_ino); 107 ubifs_dump_node(c, dn); 108 return -EINVAL; 109 } 110 111 static int do_readpage(struct page *page) 112 { 113 void *addr; 114 int err = 0, i; 115 unsigned int block, beyond; 116 struct ubifs_data_node *dn; 117 struct inode *inode = page->mapping->host; 118 struct ubifs_info *c = inode->i_sb->s_fs_info; 119 loff_t i_size = i_size_read(inode); 120 121 dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx", 122 inode->i_ino, page->index, i_size, page->flags); 123 ubifs_assert(c, !PageChecked(page)); 124 ubifs_assert(c, !PagePrivate(page)); 125 126 addr = kmap(page); 127 128 block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT; 129 beyond = (i_size + UBIFS_BLOCK_SIZE - 1) >> UBIFS_BLOCK_SHIFT; 130 if (block >= beyond) { 131 /* Reading beyond inode */ 132 SetPageChecked(page); 133 memset(addr, 0, PAGE_SIZE); 134 goto out; 135 } 136 137 dn = kmalloc(UBIFS_MAX_DATA_NODE_SZ, GFP_NOFS); 138 if (!dn) { 139 err = -ENOMEM; 140 goto error; 141 } 142 143 i = 0; 144 while (1) { 145 int ret; 146 147 if (block >= beyond) { 148 /* Reading beyond inode */ 149 err = -ENOENT; 150 memset(addr, 0, UBIFS_BLOCK_SIZE); 151 } else { 152 ret = read_block(inode, addr, block, dn); 153 if (ret) { 154 err = ret; 155 if (err != -ENOENT) 156 break; 157 } else if (block + 1 == beyond) { 158 int dlen = le32_to_cpu(dn->size); 159 int ilen = i_size & (UBIFS_BLOCK_SIZE - 1); 160 161 if (ilen && ilen < dlen) 162 memset(addr + ilen, 0, dlen - ilen); 163 } 164 } 165 if (++i >= UBIFS_BLOCKS_PER_PAGE) 166 break; 167 block += 1; 168 addr += UBIFS_BLOCK_SIZE; 169 } 170 if (err) { 171 struct ubifs_info *c = inode->i_sb->s_fs_info; 172 if (err == -ENOENT) { 173 /* Not found, so it must be a hole */ 174 SetPageChecked(page); 175 dbg_gen("hole"); 176 goto out_free; 177 } 178 ubifs_err(c, "cannot read page %lu of inode %lu, error %d", 179 page->index, inode->i_ino, err); 180 goto error; 181 } 182 183 out_free: 184 kfree(dn); 185 out: 186 SetPageUptodate(page); 187 ClearPageError(page); 188 flush_dcache_page(page); 189 kunmap(page); 190 return 0; 191 192 error: 193 kfree(dn); 194 ClearPageUptodate(page); 195 SetPageError(page); 196 flush_dcache_page(page); 197 kunmap(page); 198 return err; 199 } 200 201 /** 202 * release_new_page_budget - release budget of a new page. 203 * @c: UBIFS file-system description object 204 * 205 * This is a helper function which releases budget corresponding to the budget 206 * of one new page of data. 207 */ 208 static void release_new_page_budget(struct ubifs_info *c) 209 { 210 struct ubifs_budget_req req = { .recalculate = 1, .new_page = 1 }; 211 212 ubifs_release_budget(c, &req); 213 } 214 215 /** 216 * release_existing_page_budget - release budget of an existing page. 217 * @c: UBIFS file-system description object 218 * 219 * This is a helper function which releases budget corresponding to the budget 220 * of changing one one page of data which already exists on the flash media. 221 */ 222 static void release_existing_page_budget(struct ubifs_info *c) 223 { 224 struct ubifs_budget_req req = { .dd_growth = c->bi.page_budget}; 225 226 ubifs_release_budget(c, &req); 227 } 228 229 static int write_begin_slow(struct address_space *mapping, 230 loff_t pos, unsigned len, struct page **pagep, 231 unsigned flags) 232 { 233 struct inode *inode = mapping->host; 234 struct ubifs_info *c = inode->i_sb->s_fs_info; 235 pgoff_t index = pos >> PAGE_SHIFT; 236 struct ubifs_budget_req req = { .new_page = 1 }; 237 int uninitialized_var(err), appending = !!(pos + len > inode->i_size); 238 struct page *page; 239 240 dbg_gen("ino %lu, pos %llu, len %u, i_size %lld", 241 inode->i_ino, pos, len, inode->i_size); 242 243 /* 244 * At the slow path we have to budget before locking the page, because 245 * budgeting may force write-back, which would wait on locked pages and 246 * deadlock if we had the page locked. At this point we do not know 247 * anything about the page, so assume that this is a new page which is 248 * written to a hole. This corresponds to largest budget. Later the 249 * budget will be amended if this is not true. 250 */ 251 if (appending) 252 /* We are appending data, budget for inode change */ 253 req.dirtied_ino = 1; 254 255 err = ubifs_budget_space(c, &req); 256 if (unlikely(err)) 257 return err; 258 259 page = grab_cache_page_write_begin(mapping, index, flags); 260 if (unlikely(!page)) { 261 ubifs_release_budget(c, &req); 262 return -ENOMEM; 263 } 264 265 if (!PageUptodate(page)) { 266 if (!(pos & ~PAGE_MASK) && len == PAGE_SIZE) 267 SetPageChecked(page); 268 else { 269 err = do_readpage(page); 270 if (err) { 271 unlock_page(page); 272 put_page(page); 273 ubifs_release_budget(c, &req); 274 return err; 275 } 276 } 277 278 SetPageUptodate(page); 279 ClearPageError(page); 280 } 281 282 if (PagePrivate(page)) 283 /* 284 * The page is dirty, which means it was budgeted twice: 285 * o first time the budget was allocated by the task which 286 * made the page dirty and set the PG_private flag; 287 * o and then we budgeted for it for the second time at the 288 * very beginning of this function. 289 * 290 * So what we have to do is to release the page budget we 291 * allocated. 292 */ 293 release_new_page_budget(c); 294 else if (!PageChecked(page)) 295 /* 296 * We are changing a page which already exists on the media. 297 * This means that changing the page does not make the amount 298 * of indexing information larger, and this part of the budget 299 * which we have already acquired may be released. 300 */ 301 ubifs_convert_page_budget(c); 302 303 if (appending) { 304 struct ubifs_inode *ui = ubifs_inode(inode); 305 306 /* 307 * 'ubifs_write_end()' is optimized from the fast-path part of 308 * 'ubifs_write_begin()' and expects the @ui_mutex to be locked 309 * if data is appended. 310 */ 311 mutex_lock(&ui->ui_mutex); 312 if (ui->dirty) 313 /* 314 * The inode is dirty already, so we may free the 315 * budget we allocated. 316 */ 317 ubifs_release_dirty_inode_budget(c, ui); 318 } 319 320 *pagep = page; 321 return 0; 322 } 323 324 /** 325 * allocate_budget - allocate budget for 'ubifs_write_begin()'. 326 * @c: UBIFS file-system description object 327 * @page: page to allocate budget for 328 * @ui: UBIFS inode object the page belongs to 329 * @appending: non-zero if the page is appended 330 * 331 * This is a helper function for 'ubifs_write_begin()' which allocates budget 332 * for the operation. The budget is allocated differently depending on whether 333 * this is appending, whether the page is dirty or not, and so on. This 334 * function leaves the @ui->ui_mutex locked in case of appending. Returns zero 335 * in case of success and %-ENOSPC in case of failure. 336 */ 337 static int allocate_budget(struct ubifs_info *c, struct page *page, 338 struct ubifs_inode *ui, int appending) 339 { 340 struct ubifs_budget_req req = { .fast = 1 }; 341 342 if (PagePrivate(page)) { 343 if (!appending) 344 /* 345 * The page is dirty and we are not appending, which 346 * means no budget is needed at all. 347 */ 348 return 0; 349 350 mutex_lock(&ui->ui_mutex); 351 if (ui->dirty) 352 /* 353 * The page is dirty and we are appending, so the inode 354 * has to be marked as dirty. However, it is already 355 * dirty, so we do not need any budget. We may return, 356 * but @ui->ui_mutex hast to be left locked because we 357 * should prevent write-back from flushing the inode 358 * and freeing the budget. The lock will be released in 359 * 'ubifs_write_end()'. 360 */ 361 return 0; 362 363 /* 364 * The page is dirty, we are appending, the inode is clean, so 365 * we need to budget the inode change. 366 */ 367 req.dirtied_ino = 1; 368 } else { 369 if (PageChecked(page)) 370 /* 371 * The page corresponds to a hole and does not 372 * exist on the media. So changing it makes 373 * make the amount of indexing information 374 * larger, and we have to budget for a new 375 * page. 376 */ 377 req.new_page = 1; 378 else 379 /* 380 * Not a hole, the change will not add any new 381 * indexing information, budget for page 382 * change. 383 */ 384 req.dirtied_page = 1; 385 386 if (appending) { 387 mutex_lock(&ui->ui_mutex); 388 if (!ui->dirty) 389 /* 390 * The inode is clean but we will have to mark 391 * it as dirty because we are appending. This 392 * needs a budget. 393 */ 394 req.dirtied_ino = 1; 395 } 396 } 397 398 return ubifs_budget_space(c, &req); 399 } 400 401 /* 402 * This function is called when a page of data is going to be written. Since 403 * the page of data will not necessarily go to the flash straight away, UBIFS 404 * has to reserve space on the media for it, which is done by means of 405 * budgeting. 406 * 407 * This is the hot-path of the file-system and we are trying to optimize it as 408 * much as possible. For this reasons it is split on 2 parts - slow and fast. 409 * 410 * There many budgeting cases: 411 * o a new page is appended - we have to budget for a new page and for 412 * changing the inode; however, if the inode is already dirty, there is 413 * no need to budget for it; 414 * o an existing clean page is changed - we have budget for it; if the page 415 * does not exist on the media (a hole), we have to budget for a new 416 * page; otherwise, we may budget for changing an existing page; the 417 * difference between these cases is that changing an existing page does 418 * not introduce anything new to the FS indexing information, so it does 419 * not grow, and smaller budget is acquired in this case; 420 * o an existing dirty page is changed - no need to budget at all, because 421 * the page budget has been acquired by earlier, when the page has been 422 * marked dirty. 423 * 424 * UBIFS budgeting sub-system may force write-back if it thinks there is no 425 * space to reserve. This imposes some locking restrictions and makes it 426 * impossible to take into account the above cases, and makes it impossible to 427 * optimize budgeting. 428 * 429 * The solution for this is that the fast path of 'ubifs_write_begin()' assumes 430 * there is a plenty of flash space and the budget will be acquired quickly, 431 * without forcing write-back. The slow path does not make this assumption. 432 */ 433 static int ubifs_write_begin(struct file *file, struct address_space *mapping, 434 loff_t pos, unsigned len, unsigned flags, 435 struct page **pagep, void **fsdata) 436 { 437 struct inode *inode = mapping->host; 438 struct ubifs_info *c = inode->i_sb->s_fs_info; 439 struct ubifs_inode *ui = ubifs_inode(inode); 440 pgoff_t index = pos >> PAGE_SHIFT; 441 int uninitialized_var(err), appending = !!(pos + len > inode->i_size); 442 int skipped_read = 0; 443 struct page *page; 444 445 ubifs_assert(c, ubifs_inode(inode)->ui_size == inode->i_size); 446 ubifs_assert(c, !c->ro_media && !c->ro_mount); 447 448 if (unlikely(c->ro_error)) 449 return -EROFS; 450 451 /* Try out the fast-path part first */ 452 page = grab_cache_page_write_begin(mapping, index, flags); 453 if (unlikely(!page)) 454 return -ENOMEM; 455 456 if (!PageUptodate(page)) { 457 /* The page is not loaded from the flash */ 458 if (!(pos & ~PAGE_MASK) && len == PAGE_SIZE) { 459 /* 460 * We change whole page so no need to load it. But we 461 * do not know whether this page exists on the media or 462 * not, so we assume the latter because it requires 463 * larger budget. The assumption is that it is better 464 * to budget a bit more than to read the page from the 465 * media. Thus, we are setting the @PG_checked flag 466 * here. 467 */ 468 SetPageChecked(page); 469 skipped_read = 1; 470 } else { 471 err = do_readpage(page); 472 if (err) { 473 unlock_page(page); 474 put_page(page); 475 return err; 476 } 477 } 478 479 SetPageUptodate(page); 480 ClearPageError(page); 481 } 482 483 err = allocate_budget(c, page, ui, appending); 484 if (unlikely(err)) { 485 ubifs_assert(c, err == -ENOSPC); 486 /* 487 * If we skipped reading the page because we were going to 488 * write all of it, then it is not up to date. 489 */ 490 if (skipped_read) { 491 ClearPageChecked(page); 492 ClearPageUptodate(page); 493 } 494 /* 495 * Budgeting failed which means it would have to force 496 * write-back but didn't, because we set the @fast flag in the 497 * request. Write-back cannot be done now, while we have the 498 * page locked, because it would deadlock. Unlock and free 499 * everything and fall-back to slow-path. 500 */ 501 if (appending) { 502 ubifs_assert(c, mutex_is_locked(&ui->ui_mutex)); 503 mutex_unlock(&ui->ui_mutex); 504 } 505 unlock_page(page); 506 put_page(page); 507 508 return write_begin_slow(mapping, pos, len, pagep, flags); 509 } 510 511 /* 512 * Whee, we acquired budgeting quickly - without involving 513 * garbage-collection, committing or forcing write-back. We return 514 * with @ui->ui_mutex locked if we are appending pages, and unlocked 515 * otherwise. This is an optimization (slightly hacky though). 516 */ 517 *pagep = page; 518 return 0; 519 520 } 521 522 /** 523 * cancel_budget - cancel budget. 524 * @c: UBIFS file-system description object 525 * @page: page to cancel budget for 526 * @ui: UBIFS inode object the page belongs to 527 * @appending: non-zero if the page is appended 528 * 529 * This is a helper function for a page write operation. It unlocks the 530 * @ui->ui_mutex in case of appending. 531 */ 532 static void cancel_budget(struct ubifs_info *c, struct page *page, 533 struct ubifs_inode *ui, int appending) 534 { 535 if (appending) { 536 if (!ui->dirty) 537 ubifs_release_dirty_inode_budget(c, ui); 538 mutex_unlock(&ui->ui_mutex); 539 } 540 if (!PagePrivate(page)) { 541 if (PageChecked(page)) 542 release_new_page_budget(c); 543 else 544 release_existing_page_budget(c); 545 } 546 } 547 548 static int ubifs_write_end(struct file *file, struct address_space *mapping, 549 loff_t pos, unsigned len, unsigned copied, 550 struct page *page, void *fsdata) 551 { 552 struct inode *inode = mapping->host; 553 struct ubifs_inode *ui = ubifs_inode(inode); 554 struct ubifs_info *c = inode->i_sb->s_fs_info; 555 loff_t end_pos = pos + len; 556 int appending = !!(end_pos > inode->i_size); 557 558 dbg_gen("ino %lu, pos %llu, pg %lu, len %u, copied %d, i_size %lld", 559 inode->i_ino, pos, page->index, len, copied, inode->i_size); 560 561 if (unlikely(copied < len && len == PAGE_SIZE)) { 562 /* 563 * VFS copied less data to the page that it intended and 564 * declared in its '->write_begin()' call via the @len 565 * argument. If the page was not up-to-date, and @len was 566 * @PAGE_SIZE, the 'ubifs_write_begin()' function did 567 * not load it from the media (for optimization reasons). This 568 * means that part of the page contains garbage. So read the 569 * page now. 570 */ 571 dbg_gen("copied %d instead of %d, read page and repeat", 572 copied, len); 573 cancel_budget(c, page, ui, appending); 574 ClearPageChecked(page); 575 576 /* 577 * Return 0 to force VFS to repeat the whole operation, or the 578 * error code if 'do_readpage()' fails. 579 */ 580 copied = do_readpage(page); 581 goto out; 582 } 583 584 if (!PagePrivate(page)) { 585 SetPagePrivate(page); 586 atomic_long_inc(&c->dirty_pg_cnt); 587 __set_page_dirty_nobuffers(page); 588 } 589 590 if (appending) { 591 i_size_write(inode, end_pos); 592 ui->ui_size = end_pos; 593 /* 594 * Note, we do not set @I_DIRTY_PAGES (which means that the 595 * inode has dirty pages), this has been done in 596 * '__set_page_dirty_nobuffers()'. 597 */ 598 __mark_inode_dirty(inode, I_DIRTY_DATASYNC); 599 ubifs_assert(c, mutex_is_locked(&ui->ui_mutex)); 600 mutex_unlock(&ui->ui_mutex); 601 } 602 603 out: 604 unlock_page(page); 605 put_page(page); 606 return copied; 607 } 608 609 /** 610 * populate_page - copy data nodes into a page for bulk-read. 611 * @c: UBIFS file-system description object 612 * @page: page 613 * @bu: bulk-read information 614 * @n: next zbranch slot 615 * 616 * This function returns %0 on success and a negative error code on failure. 617 */ 618 static int populate_page(struct ubifs_info *c, struct page *page, 619 struct bu_info *bu, int *n) 620 { 621 int i = 0, nn = *n, offs = bu->zbranch[0].offs, hole = 0, read = 0; 622 struct inode *inode = page->mapping->host; 623 loff_t i_size = i_size_read(inode); 624 unsigned int page_block; 625 void *addr, *zaddr; 626 pgoff_t end_index; 627 628 dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx", 629 inode->i_ino, page->index, i_size, page->flags); 630 631 addr = zaddr = kmap(page); 632 633 end_index = (i_size - 1) >> PAGE_SHIFT; 634 if (!i_size || page->index > end_index) { 635 hole = 1; 636 memset(addr, 0, PAGE_SIZE); 637 goto out_hole; 638 } 639 640 page_block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT; 641 while (1) { 642 int err, len, out_len, dlen; 643 644 if (nn >= bu->cnt) { 645 hole = 1; 646 memset(addr, 0, UBIFS_BLOCK_SIZE); 647 } else if (key_block(c, &bu->zbranch[nn].key) == page_block) { 648 struct ubifs_data_node *dn; 649 650 dn = bu->buf + (bu->zbranch[nn].offs - offs); 651 652 ubifs_assert(c, le64_to_cpu(dn->ch.sqnum) > 653 ubifs_inode(inode)->creat_sqnum); 654 655 len = le32_to_cpu(dn->size); 656 if (len <= 0 || len > UBIFS_BLOCK_SIZE) 657 goto out_err; 658 659 dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ; 660 out_len = UBIFS_BLOCK_SIZE; 661 662 if (ubifs_crypt_is_encrypted(inode)) { 663 err = ubifs_decrypt(inode, dn, &dlen, page_block); 664 if (err) 665 goto out_err; 666 } 667 668 err = ubifs_decompress(c, &dn->data, dlen, addr, &out_len, 669 le16_to_cpu(dn->compr_type)); 670 if (err || len != out_len) 671 goto out_err; 672 673 if (len < UBIFS_BLOCK_SIZE) 674 memset(addr + len, 0, UBIFS_BLOCK_SIZE - len); 675 676 nn += 1; 677 read = (i << UBIFS_BLOCK_SHIFT) + len; 678 } else if (key_block(c, &bu->zbranch[nn].key) < page_block) { 679 nn += 1; 680 continue; 681 } else { 682 hole = 1; 683 memset(addr, 0, UBIFS_BLOCK_SIZE); 684 } 685 if (++i >= UBIFS_BLOCKS_PER_PAGE) 686 break; 687 addr += UBIFS_BLOCK_SIZE; 688 page_block += 1; 689 } 690 691 if (end_index == page->index) { 692 int len = i_size & (PAGE_SIZE - 1); 693 694 if (len && len < read) 695 memset(zaddr + len, 0, read - len); 696 } 697 698 out_hole: 699 if (hole) { 700 SetPageChecked(page); 701 dbg_gen("hole"); 702 } 703 704 SetPageUptodate(page); 705 ClearPageError(page); 706 flush_dcache_page(page); 707 kunmap(page); 708 *n = nn; 709 return 0; 710 711 out_err: 712 ClearPageUptodate(page); 713 SetPageError(page); 714 flush_dcache_page(page); 715 kunmap(page); 716 ubifs_err(c, "bad data node (block %u, inode %lu)", 717 page_block, inode->i_ino); 718 return -EINVAL; 719 } 720 721 /** 722 * ubifs_do_bulk_read - do bulk-read. 723 * @c: UBIFS file-system description object 724 * @bu: bulk-read information 725 * @page1: first page to read 726 * 727 * This function returns %1 if the bulk-read is done, otherwise %0 is returned. 728 */ 729 static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu, 730 struct page *page1) 731 { 732 pgoff_t offset = page1->index, end_index; 733 struct address_space *mapping = page1->mapping; 734 struct inode *inode = mapping->host; 735 struct ubifs_inode *ui = ubifs_inode(inode); 736 int err, page_idx, page_cnt, ret = 0, n = 0; 737 int allocate = bu->buf ? 0 : 1; 738 loff_t isize; 739 gfp_t ra_gfp_mask = readahead_gfp_mask(mapping) & ~__GFP_FS; 740 741 err = ubifs_tnc_get_bu_keys(c, bu); 742 if (err) 743 goto out_warn; 744 745 if (bu->eof) { 746 /* Turn off bulk-read at the end of the file */ 747 ui->read_in_a_row = 1; 748 ui->bulk_read = 0; 749 } 750 751 page_cnt = bu->blk_cnt >> UBIFS_BLOCKS_PER_PAGE_SHIFT; 752 if (!page_cnt) { 753 /* 754 * This happens when there are multiple blocks per page and the 755 * blocks for the first page we are looking for, are not 756 * together. If all the pages were like this, bulk-read would 757 * reduce performance, so we turn it off for a while. 758 */ 759 goto out_bu_off; 760 } 761 762 if (bu->cnt) { 763 if (allocate) { 764 /* 765 * Allocate bulk-read buffer depending on how many data 766 * nodes we are going to read. 767 */ 768 bu->buf_len = bu->zbranch[bu->cnt - 1].offs + 769 bu->zbranch[bu->cnt - 1].len - 770 bu->zbranch[0].offs; 771 ubifs_assert(c, bu->buf_len > 0); 772 ubifs_assert(c, bu->buf_len <= c->leb_size); 773 bu->buf = kmalloc(bu->buf_len, GFP_NOFS | __GFP_NOWARN); 774 if (!bu->buf) 775 goto out_bu_off; 776 } 777 778 err = ubifs_tnc_bulk_read(c, bu); 779 if (err) 780 goto out_warn; 781 } 782 783 err = populate_page(c, page1, bu, &n); 784 if (err) 785 goto out_warn; 786 787 unlock_page(page1); 788 ret = 1; 789 790 isize = i_size_read(inode); 791 if (isize == 0) 792 goto out_free; 793 end_index = ((isize - 1) >> PAGE_SHIFT); 794 795 for (page_idx = 1; page_idx < page_cnt; page_idx++) { 796 pgoff_t page_offset = offset + page_idx; 797 struct page *page; 798 799 if (page_offset > end_index) 800 break; 801 page = find_or_create_page(mapping, page_offset, ra_gfp_mask); 802 if (!page) 803 break; 804 if (!PageUptodate(page)) 805 err = populate_page(c, page, bu, &n); 806 unlock_page(page); 807 put_page(page); 808 if (err) 809 break; 810 } 811 812 ui->last_page_read = offset + page_idx - 1; 813 814 out_free: 815 if (allocate) 816 kfree(bu->buf); 817 return ret; 818 819 out_warn: 820 ubifs_warn(c, "ignoring error %d and skipping bulk-read", err); 821 goto out_free; 822 823 out_bu_off: 824 ui->read_in_a_row = ui->bulk_read = 0; 825 goto out_free; 826 } 827 828 /** 829 * ubifs_bulk_read - determine whether to bulk-read and, if so, do it. 830 * @page: page from which to start bulk-read. 831 * 832 * Some flash media are capable of reading sequentially at faster rates. UBIFS 833 * bulk-read facility is designed to take advantage of that, by reading in one 834 * go consecutive data nodes that are also located consecutively in the same 835 * LEB. This function returns %1 if a bulk-read is done and %0 otherwise. 836 */ 837 static int ubifs_bulk_read(struct page *page) 838 { 839 struct inode *inode = page->mapping->host; 840 struct ubifs_info *c = inode->i_sb->s_fs_info; 841 struct ubifs_inode *ui = ubifs_inode(inode); 842 pgoff_t index = page->index, last_page_read = ui->last_page_read; 843 struct bu_info *bu; 844 int err = 0, allocated = 0; 845 846 ui->last_page_read = index; 847 if (!c->bulk_read) 848 return 0; 849 850 /* 851 * Bulk-read is protected by @ui->ui_mutex, but it is an optimization, 852 * so don't bother if we cannot lock the mutex. 853 */ 854 if (!mutex_trylock(&ui->ui_mutex)) 855 return 0; 856 857 if (index != last_page_read + 1) { 858 /* Turn off bulk-read if we stop reading sequentially */ 859 ui->read_in_a_row = 1; 860 if (ui->bulk_read) 861 ui->bulk_read = 0; 862 goto out_unlock; 863 } 864 865 if (!ui->bulk_read) { 866 ui->read_in_a_row += 1; 867 if (ui->read_in_a_row < 3) 868 goto out_unlock; 869 /* Three reads in a row, so switch on bulk-read */ 870 ui->bulk_read = 1; 871 } 872 873 /* 874 * If possible, try to use pre-allocated bulk-read information, which 875 * is protected by @c->bu_mutex. 876 */ 877 if (mutex_trylock(&c->bu_mutex)) 878 bu = &c->bu; 879 else { 880 bu = kmalloc(sizeof(struct bu_info), GFP_NOFS | __GFP_NOWARN); 881 if (!bu) 882 goto out_unlock; 883 884 bu->buf = NULL; 885 allocated = 1; 886 } 887 888 bu->buf_len = c->max_bu_buf_len; 889 data_key_init(c, &bu->key, inode->i_ino, 890 page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT); 891 err = ubifs_do_bulk_read(c, bu, page); 892 893 if (!allocated) 894 mutex_unlock(&c->bu_mutex); 895 else 896 kfree(bu); 897 898 out_unlock: 899 mutex_unlock(&ui->ui_mutex); 900 return err; 901 } 902 903 static int ubifs_readpage(struct file *file, struct page *page) 904 { 905 if (ubifs_bulk_read(page)) 906 return 0; 907 do_readpage(page); 908 unlock_page(page); 909 return 0; 910 } 911 912 static int do_writepage(struct page *page, int len) 913 { 914 int err = 0, i, blen; 915 unsigned int block; 916 void *addr; 917 union ubifs_key key; 918 struct inode *inode = page->mapping->host; 919 struct ubifs_info *c = inode->i_sb->s_fs_info; 920 921 #ifdef UBIFS_DEBUG 922 struct ubifs_inode *ui = ubifs_inode(inode); 923 spin_lock(&ui->ui_lock); 924 ubifs_assert(c, page->index <= ui->synced_i_size >> PAGE_SHIFT); 925 spin_unlock(&ui->ui_lock); 926 #endif 927 928 /* Update radix tree tags */ 929 set_page_writeback(page); 930 931 addr = kmap(page); 932 block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT; 933 i = 0; 934 while (len) { 935 blen = min_t(int, len, UBIFS_BLOCK_SIZE); 936 data_key_init(c, &key, inode->i_ino, block); 937 err = ubifs_jnl_write_data(c, inode, &key, addr, blen); 938 if (err) 939 break; 940 if (++i >= UBIFS_BLOCKS_PER_PAGE) 941 break; 942 block += 1; 943 addr += blen; 944 len -= blen; 945 } 946 if (err) { 947 SetPageError(page); 948 ubifs_err(c, "cannot write page %lu of inode %lu, error %d", 949 page->index, inode->i_ino, err); 950 ubifs_ro_mode(c, err); 951 } 952 953 ubifs_assert(c, PagePrivate(page)); 954 if (PageChecked(page)) 955 release_new_page_budget(c); 956 else 957 release_existing_page_budget(c); 958 959 atomic_long_dec(&c->dirty_pg_cnt); 960 ClearPagePrivate(page); 961 ClearPageChecked(page); 962 963 kunmap(page); 964 unlock_page(page); 965 end_page_writeback(page); 966 return err; 967 } 968 969 /* 970 * When writing-back dirty inodes, VFS first writes-back pages belonging to the 971 * inode, then the inode itself. For UBIFS this may cause a problem. Consider a 972 * situation when a we have an inode with size 0, then a megabyte of data is 973 * appended to the inode, then write-back starts and flushes some amount of the 974 * dirty pages, the journal becomes full, commit happens and finishes, and then 975 * an unclean reboot happens. When the file system is mounted next time, the 976 * inode size would still be 0, but there would be many pages which are beyond 977 * the inode size, they would be indexed and consume flash space. Because the 978 * journal has been committed, the replay would not be able to detect this 979 * situation and correct the inode size. This means UBIFS would have to scan 980 * whole index and correct all inode sizes, which is long an unacceptable. 981 * 982 * To prevent situations like this, UBIFS writes pages back only if they are 983 * within the last synchronized inode size, i.e. the size which has been 984 * written to the flash media last time. Otherwise, UBIFS forces inode 985 * write-back, thus making sure the on-flash inode contains current inode size, 986 * and then keeps writing pages back. 987 * 988 * Some locking issues explanation. 'ubifs_writepage()' first is called with 989 * the page locked, and it locks @ui_mutex. However, write-back does take inode 990 * @i_mutex, which means other VFS operations may be run on this inode at the 991 * same time. And the problematic one is truncation to smaller size, from where 992 * we have to call 'truncate_setsize()', which first changes @inode->i_size, 993 * then drops the truncated pages. And while dropping the pages, it takes the 994 * page lock. This means that 'do_truncation()' cannot call 'truncate_setsize()' 995 * with @ui_mutex locked, because it would deadlock with 'ubifs_writepage()'. 996 * This means that @inode->i_size is changed while @ui_mutex is unlocked. 997 * 998 * XXX(truncate): with the new truncate sequence this is not true anymore, 999 * and the calls to truncate_setsize can be move around freely. They should 1000 * be moved to the very end of the truncate sequence. 1001 * 1002 * But in 'ubifs_writepage()' we have to guarantee that we do not write beyond 1003 * inode size. How do we do this if @inode->i_size may became smaller while we 1004 * are in the middle of 'ubifs_writepage()'? The UBIFS solution is the 1005 * @ui->ui_isize "shadow" field which UBIFS uses instead of @inode->i_size 1006 * internally and updates it under @ui_mutex. 1007 * 1008 * Q: why we do not worry that if we race with truncation, we may end up with a 1009 * situation when the inode is truncated while we are in the middle of 1010 * 'do_writepage()', so we do write beyond inode size? 1011 * A: If we are in the middle of 'do_writepage()', truncation would be locked 1012 * on the page lock and it would not write the truncated inode node to the 1013 * journal before we have finished. 1014 */ 1015 static int ubifs_writepage(struct page *page, struct writeback_control *wbc) 1016 { 1017 struct inode *inode = page->mapping->host; 1018 struct ubifs_info *c = inode->i_sb->s_fs_info; 1019 struct ubifs_inode *ui = ubifs_inode(inode); 1020 loff_t i_size = i_size_read(inode), synced_i_size; 1021 pgoff_t end_index = i_size >> PAGE_SHIFT; 1022 int err, len = i_size & (PAGE_SIZE - 1); 1023 void *kaddr; 1024 1025 dbg_gen("ino %lu, pg %lu, pg flags %#lx", 1026 inode->i_ino, page->index, page->flags); 1027 ubifs_assert(c, PagePrivate(page)); 1028 1029 /* Is the page fully outside @i_size? (truncate in progress) */ 1030 if (page->index > end_index || (page->index == end_index && !len)) { 1031 err = 0; 1032 goto out_unlock; 1033 } 1034 1035 spin_lock(&ui->ui_lock); 1036 synced_i_size = ui->synced_i_size; 1037 spin_unlock(&ui->ui_lock); 1038 1039 /* Is the page fully inside @i_size? */ 1040 if (page->index < end_index) { 1041 if (page->index >= synced_i_size >> PAGE_SHIFT) { 1042 err = inode->i_sb->s_op->write_inode(inode, NULL); 1043 if (err) 1044 goto out_unlock; 1045 /* 1046 * The inode has been written, but the write-buffer has 1047 * not been synchronized, so in case of an unclean 1048 * reboot we may end up with some pages beyond inode 1049 * size, but they would be in the journal (because 1050 * commit flushes write buffers) and recovery would deal 1051 * with this. 1052 */ 1053 } 1054 return do_writepage(page, PAGE_SIZE); 1055 } 1056 1057 /* 1058 * The page straddles @i_size. It must be zeroed out on each and every 1059 * writepage invocation because it may be mmapped. "A file is mapped 1060 * in multiples of the page size. For a file that is not a multiple of 1061 * the page size, the remaining memory is zeroed when mapped, and 1062 * writes to that region are not written out to the file." 1063 */ 1064 kaddr = kmap_atomic(page); 1065 memset(kaddr + len, 0, PAGE_SIZE - len); 1066 flush_dcache_page(page); 1067 kunmap_atomic(kaddr); 1068 1069 if (i_size > synced_i_size) { 1070 err = inode->i_sb->s_op->write_inode(inode, NULL); 1071 if (err) 1072 goto out_unlock; 1073 } 1074 1075 return do_writepage(page, len); 1076 1077 out_unlock: 1078 unlock_page(page); 1079 return err; 1080 } 1081 1082 /** 1083 * do_attr_changes - change inode attributes. 1084 * @inode: inode to change attributes for 1085 * @attr: describes attributes to change 1086 */ 1087 static void do_attr_changes(struct inode *inode, const struct iattr *attr) 1088 { 1089 if (attr->ia_valid & ATTR_UID) 1090 inode->i_uid = attr->ia_uid; 1091 if (attr->ia_valid & ATTR_GID) 1092 inode->i_gid = attr->ia_gid; 1093 if (attr->ia_valid & ATTR_ATIME) 1094 inode->i_atime = timespec64_trunc(attr->ia_atime, 1095 inode->i_sb->s_time_gran); 1096 if (attr->ia_valid & ATTR_MTIME) 1097 inode->i_mtime = timespec64_trunc(attr->ia_mtime, 1098 inode->i_sb->s_time_gran); 1099 if (attr->ia_valid & ATTR_CTIME) 1100 inode->i_ctime = timespec64_trunc(attr->ia_ctime, 1101 inode->i_sb->s_time_gran); 1102 if (attr->ia_valid & ATTR_MODE) { 1103 umode_t mode = attr->ia_mode; 1104 1105 if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID)) 1106 mode &= ~S_ISGID; 1107 inode->i_mode = mode; 1108 } 1109 } 1110 1111 /** 1112 * do_truncation - truncate an inode. 1113 * @c: UBIFS file-system description object 1114 * @inode: inode to truncate 1115 * @attr: inode attribute changes description 1116 * 1117 * This function implements VFS '->setattr()' call when the inode is truncated 1118 * to a smaller size. Returns zero in case of success and a negative error code 1119 * in case of failure. 1120 */ 1121 static int do_truncation(struct ubifs_info *c, struct inode *inode, 1122 const struct iattr *attr) 1123 { 1124 int err; 1125 struct ubifs_budget_req req; 1126 loff_t old_size = inode->i_size, new_size = attr->ia_size; 1127 int offset = new_size & (UBIFS_BLOCK_SIZE - 1), budgeted = 1; 1128 struct ubifs_inode *ui = ubifs_inode(inode); 1129 1130 dbg_gen("ino %lu, size %lld -> %lld", inode->i_ino, old_size, new_size); 1131 memset(&req, 0, sizeof(struct ubifs_budget_req)); 1132 1133 /* 1134 * If this is truncation to a smaller size, and we do not truncate on a 1135 * block boundary, budget for changing one data block, because the last 1136 * block will be re-written. 1137 */ 1138 if (new_size & (UBIFS_BLOCK_SIZE - 1)) 1139 req.dirtied_page = 1; 1140 1141 req.dirtied_ino = 1; 1142 /* A funny way to budget for truncation node */ 1143 req.dirtied_ino_d = UBIFS_TRUN_NODE_SZ; 1144 err = ubifs_budget_space(c, &req); 1145 if (err) { 1146 /* 1147 * Treat truncations to zero as deletion and always allow them, 1148 * just like we do for '->unlink()'. 1149 */ 1150 if (new_size || err != -ENOSPC) 1151 return err; 1152 budgeted = 0; 1153 } 1154 1155 truncate_setsize(inode, new_size); 1156 1157 if (offset) { 1158 pgoff_t index = new_size >> PAGE_SHIFT; 1159 struct page *page; 1160 1161 page = find_lock_page(inode->i_mapping, index); 1162 if (page) { 1163 if (PageDirty(page)) { 1164 /* 1165 * 'ubifs_jnl_truncate()' will try to truncate 1166 * the last data node, but it contains 1167 * out-of-date data because the page is dirty. 1168 * Write the page now, so that 1169 * 'ubifs_jnl_truncate()' will see an already 1170 * truncated (and up to date) data node. 1171 */ 1172 ubifs_assert(c, PagePrivate(page)); 1173 1174 clear_page_dirty_for_io(page); 1175 if (UBIFS_BLOCKS_PER_PAGE_SHIFT) 1176 offset = new_size & 1177 (PAGE_SIZE - 1); 1178 err = do_writepage(page, offset); 1179 put_page(page); 1180 if (err) 1181 goto out_budg; 1182 /* 1183 * We could now tell 'ubifs_jnl_truncate()' not 1184 * to read the last block. 1185 */ 1186 } else { 1187 /* 1188 * We could 'kmap()' the page and pass the data 1189 * to 'ubifs_jnl_truncate()' to save it from 1190 * having to read it. 1191 */ 1192 unlock_page(page); 1193 put_page(page); 1194 } 1195 } 1196 } 1197 1198 mutex_lock(&ui->ui_mutex); 1199 ui->ui_size = inode->i_size; 1200 /* Truncation changes inode [mc]time */ 1201 inode->i_mtime = inode->i_ctime = current_time(inode); 1202 /* Other attributes may be changed at the same time as well */ 1203 do_attr_changes(inode, attr); 1204 err = ubifs_jnl_truncate(c, inode, old_size, new_size); 1205 mutex_unlock(&ui->ui_mutex); 1206 1207 out_budg: 1208 if (budgeted) 1209 ubifs_release_budget(c, &req); 1210 else { 1211 c->bi.nospace = c->bi.nospace_rp = 0; 1212 smp_wmb(); 1213 } 1214 return err; 1215 } 1216 1217 /** 1218 * do_setattr - change inode attributes. 1219 * @c: UBIFS file-system description object 1220 * @inode: inode to change attributes for 1221 * @attr: inode attribute changes description 1222 * 1223 * This function implements VFS '->setattr()' call for all cases except 1224 * truncations to smaller size. Returns zero in case of success and a negative 1225 * error code in case of failure. 1226 */ 1227 static int do_setattr(struct ubifs_info *c, struct inode *inode, 1228 const struct iattr *attr) 1229 { 1230 int err, release; 1231 loff_t new_size = attr->ia_size; 1232 struct ubifs_inode *ui = ubifs_inode(inode); 1233 struct ubifs_budget_req req = { .dirtied_ino = 1, 1234 .dirtied_ino_d = ALIGN(ui->data_len, 8) }; 1235 1236 err = ubifs_budget_space(c, &req); 1237 if (err) 1238 return err; 1239 1240 if (attr->ia_valid & ATTR_SIZE) { 1241 dbg_gen("size %lld -> %lld", inode->i_size, new_size); 1242 truncate_setsize(inode, new_size); 1243 } 1244 1245 mutex_lock(&ui->ui_mutex); 1246 if (attr->ia_valid & ATTR_SIZE) { 1247 /* Truncation changes inode [mc]time */ 1248 inode->i_mtime = inode->i_ctime = current_time(inode); 1249 /* 'truncate_setsize()' changed @i_size, update @ui_size */ 1250 ui->ui_size = inode->i_size; 1251 } 1252 1253 do_attr_changes(inode, attr); 1254 1255 release = ui->dirty; 1256 if (attr->ia_valid & ATTR_SIZE) 1257 /* 1258 * Inode length changed, so we have to make sure 1259 * @I_DIRTY_DATASYNC is set. 1260 */ 1261 __mark_inode_dirty(inode, I_DIRTY_DATASYNC); 1262 else 1263 mark_inode_dirty_sync(inode); 1264 mutex_unlock(&ui->ui_mutex); 1265 1266 if (release) 1267 ubifs_release_budget(c, &req); 1268 if (IS_SYNC(inode)) 1269 err = inode->i_sb->s_op->write_inode(inode, NULL); 1270 return err; 1271 } 1272 1273 int ubifs_setattr(struct dentry *dentry, struct iattr *attr) 1274 { 1275 int err; 1276 struct inode *inode = d_inode(dentry); 1277 struct ubifs_info *c = inode->i_sb->s_fs_info; 1278 1279 dbg_gen("ino %lu, mode %#x, ia_valid %#x", 1280 inode->i_ino, inode->i_mode, attr->ia_valid); 1281 err = setattr_prepare(dentry, attr); 1282 if (err) 1283 return err; 1284 1285 err = dbg_check_synced_i_size(c, inode); 1286 if (err) 1287 return err; 1288 1289 err = fscrypt_prepare_setattr(dentry, attr); 1290 if (err) 1291 return err; 1292 1293 if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size < inode->i_size) 1294 /* Truncation to a smaller size */ 1295 err = do_truncation(c, inode, attr); 1296 else 1297 err = do_setattr(c, inode, attr); 1298 1299 return err; 1300 } 1301 1302 static void ubifs_invalidatepage(struct page *page, unsigned int offset, 1303 unsigned int length) 1304 { 1305 struct inode *inode = page->mapping->host; 1306 struct ubifs_info *c = inode->i_sb->s_fs_info; 1307 1308 ubifs_assert(c, PagePrivate(page)); 1309 if (offset || length < PAGE_SIZE) 1310 /* Partial page remains dirty */ 1311 return; 1312 1313 if (PageChecked(page)) 1314 release_new_page_budget(c); 1315 else 1316 release_existing_page_budget(c); 1317 1318 atomic_long_dec(&c->dirty_pg_cnt); 1319 ClearPagePrivate(page); 1320 ClearPageChecked(page); 1321 } 1322 1323 int ubifs_fsync(struct file *file, loff_t start, loff_t end, int datasync) 1324 { 1325 struct inode *inode = file->f_mapping->host; 1326 struct ubifs_info *c = inode->i_sb->s_fs_info; 1327 int err; 1328 1329 dbg_gen("syncing inode %lu", inode->i_ino); 1330 1331 if (c->ro_mount) 1332 /* 1333 * For some really strange reasons VFS does not filter out 1334 * 'fsync()' for R/O mounted file-systems as per 2.6.39. 1335 */ 1336 return 0; 1337 1338 err = file_write_and_wait_range(file, start, end); 1339 if (err) 1340 return err; 1341 inode_lock(inode); 1342 1343 /* Synchronize the inode unless this is a 'datasync()' call. */ 1344 if (!datasync || (inode->i_state & I_DIRTY_DATASYNC)) { 1345 err = inode->i_sb->s_op->write_inode(inode, NULL); 1346 if (err) 1347 goto out; 1348 } 1349 1350 /* 1351 * Nodes related to this inode may still sit in a write-buffer. Flush 1352 * them. 1353 */ 1354 err = ubifs_sync_wbufs_by_inode(c, inode); 1355 out: 1356 inode_unlock(inode); 1357 return err; 1358 } 1359 1360 /** 1361 * mctime_update_needed - check if mtime or ctime update is needed. 1362 * @inode: the inode to do the check for 1363 * @now: current time 1364 * 1365 * This helper function checks if the inode mtime/ctime should be updated or 1366 * not. If current values of the time-stamps are within the UBIFS inode time 1367 * granularity, they are not updated. This is an optimization. 1368 */ 1369 static inline int mctime_update_needed(const struct inode *inode, 1370 const struct timespec64 *now) 1371 { 1372 if (!timespec64_equal(&inode->i_mtime, now) || 1373 !timespec64_equal(&inode->i_ctime, now)) 1374 return 1; 1375 return 0; 1376 } 1377 1378 /** 1379 * ubifs_update_time - update time of inode. 1380 * @inode: inode to update 1381 * 1382 * This function updates time of the inode. 1383 */ 1384 int ubifs_update_time(struct inode *inode, struct timespec64 *time, 1385 int flags) 1386 { 1387 struct ubifs_inode *ui = ubifs_inode(inode); 1388 struct ubifs_info *c = inode->i_sb->s_fs_info; 1389 struct ubifs_budget_req req = { .dirtied_ino = 1, 1390 .dirtied_ino_d = ALIGN(ui->data_len, 8) }; 1391 int iflags = I_DIRTY_TIME; 1392 int err, release; 1393 1394 if (!IS_ENABLED(CONFIG_UBIFS_ATIME_SUPPORT)) 1395 return generic_update_time(inode, time, flags); 1396 1397 err = ubifs_budget_space(c, &req); 1398 if (err) 1399 return err; 1400 1401 mutex_lock(&ui->ui_mutex); 1402 if (flags & S_ATIME) 1403 inode->i_atime = *time; 1404 if (flags & S_CTIME) 1405 inode->i_ctime = *time; 1406 if (flags & S_MTIME) 1407 inode->i_mtime = *time; 1408 1409 if (!(inode->i_sb->s_flags & SB_LAZYTIME)) 1410 iflags |= I_DIRTY_SYNC; 1411 1412 release = ui->dirty; 1413 __mark_inode_dirty(inode, iflags); 1414 mutex_unlock(&ui->ui_mutex); 1415 if (release) 1416 ubifs_release_budget(c, &req); 1417 return 0; 1418 } 1419 1420 /** 1421 * update_mctime - update mtime and ctime of an inode. 1422 * @inode: inode to update 1423 * 1424 * This function updates mtime and ctime of the inode if it is not equivalent to 1425 * current time. Returns zero in case of success and a negative error code in 1426 * case of failure. 1427 */ 1428 static int update_mctime(struct inode *inode) 1429 { 1430 struct timespec64 now = current_time(inode); 1431 struct ubifs_inode *ui = ubifs_inode(inode); 1432 struct ubifs_info *c = inode->i_sb->s_fs_info; 1433 1434 if (mctime_update_needed(inode, &now)) { 1435 int err, release; 1436 struct ubifs_budget_req req = { .dirtied_ino = 1, 1437 .dirtied_ino_d = ALIGN(ui->data_len, 8) }; 1438 1439 err = ubifs_budget_space(c, &req); 1440 if (err) 1441 return err; 1442 1443 mutex_lock(&ui->ui_mutex); 1444 inode->i_mtime = inode->i_ctime = current_time(inode); 1445 release = ui->dirty; 1446 mark_inode_dirty_sync(inode); 1447 mutex_unlock(&ui->ui_mutex); 1448 if (release) 1449 ubifs_release_budget(c, &req); 1450 } 1451 1452 return 0; 1453 } 1454 1455 static ssize_t ubifs_write_iter(struct kiocb *iocb, struct iov_iter *from) 1456 { 1457 int err = update_mctime(file_inode(iocb->ki_filp)); 1458 if (err) 1459 return err; 1460 1461 return generic_file_write_iter(iocb, from); 1462 } 1463 1464 static int ubifs_set_page_dirty(struct page *page) 1465 { 1466 int ret; 1467 struct inode *inode = page->mapping->host; 1468 struct ubifs_info *c = inode->i_sb->s_fs_info; 1469 1470 ret = __set_page_dirty_nobuffers(page); 1471 /* 1472 * An attempt to dirty a page without budgeting for it - should not 1473 * happen. 1474 */ 1475 ubifs_assert(c, ret == 0); 1476 return ret; 1477 } 1478 1479 #ifdef CONFIG_MIGRATION 1480 static int ubifs_migrate_page(struct address_space *mapping, 1481 struct page *newpage, struct page *page, enum migrate_mode mode) 1482 { 1483 int rc; 1484 1485 rc = migrate_page_move_mapping(mapping, newpage, page, mode, 0); 1486 if (rc != MIGRATEPAGE_SUCCESS) 1487 return rc; 1488 1489 if (PagePrivate(page)) { 1490 ClearPagePrivate(page); 1491 SetPagePrivate(newpage); 1492 } 1493 1494 if (mode != MIGRATE_SYNC_NO_COPY) 1495 migrate_page_copy(newpage, page); 1496 else 1497 migrate_page_states(newpage, page); 1498 return MIGRATEPAGE_SUCCESS; 1499 } 1500 #endif 1501 1502 static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags) 1503 { 1504 struct inode *inode = page->mapping->host; 1505 struct ubifs_info *c = inode->i_sb->s_fs_info; 1506 1507 /* 1508 * An attempt to release a dirty page without budgeting for it - should 1509 * not happen. 1510 */ 1511 if (PageWriteback(page)) 1512 return 0; 1513 ubifs_assert(c, PagePrivate(page)); 1514 ubifs_assert(c, 0); 1515 ClearPagePrivate(page); 1516 ClearPageChecked(page); 1517 return 1; 1518 } 1519 1520 /* 1521 * mmap()d file has taken write protection fault and is being made writable. 1522 * UBIFS must ensure page is budgeted for. 1523 */ 1524 static vm_fault_t ubifs_vm_page_mkwrite(struct vm_fault *vmf) 1525 { 1526 struct page *page = vmf->page; 1527 struct inode *inode = file_inode(vmf->vma->vm_file); 1528 struct ubifs_info *c = inode->i_sb->s_fs_info; 1529 struct timespec64 now = current_time(inode); 1530 struct ubifs_budget_req req = { .new_page = 1 }; 1531 int err, update_time; 1532 1533 dbg_gen("ino %lu, pg %lu, i_size %lld", inode->i_ino, page->index, 1534 i_size_read(inode)); 1535 ubifs_assert(c, !c->ro_media && !c->ro_mount); 1536 1537 if (unlikely(c->ro_error)) 1538 return VM_FAULT_SIGBUS; /* -EROFS */ 1539 1540 /* 1541 * We have not locked @page so far so we may budget for changing the 1542 * page. Note, we cannot do this after we locked the page, because 1543 * budgeting may cause write-back which would cause deadlock. 1544 * 1545 * At the moment we do not know whether the page is dirty or not, so we 1546 * assume that it is not and budget for a new page. We could look at 1547 * the @PG_private flag and figure this out, but we may race with write 1548 * back and the page state may change by the time we lock it, so this 1549 * would need additional care. We do not bother with this at the 1550 * moment, although it might be good idea to do. Instead, we allocate 1551 * budget for a new page and amend it later on if the page was in fact 1552 * dirty. 1553 * 1554 * The budgeting-related logic of this function is similar to what we 1555 * do in 'ubifs_write_begin()' and 'ubifs_write_end()'. Glance there 1556 * for more comments. 1557 */ 1558 update_time = mctime_update_needed(inode, &now); 1559 if (update_time) 1560 /* 1561 * We have to change inode time stamp which requires extra 1562 * budgeting. 1563 */ 1564 req.dirtied_ino = 1; 1565 1566 err = ubifs_budget_space(c, &req); 1567 if (unlikely(err)) { 1568 if (err == -ENOSPC) 1569 ubifs_warn(c, "out of space for mmapped file (inode number %lu)", 1570 inode->i_ino); 1571 return VM_FAULT_SIGBUS; 1572 } 1573 1574 lock_page(page); 1575 if (unlikely(page->mapping != inode->i_mapping || 1576 page_offset(page) > i_size_read(inode))) { 1577 /* Page got truncated out from underneath us */ 1578 goto sigbus; 1579 } 1580 1581 if (PagePrivate(page)) 1582 release_new_page_budget(c); 1583 else { 1584 if (!PageChecked(page)) 1585 ubifs_convert_page_budget(c); 1586 SetPagePrivate(page); 1587 atomic_long_inc(&c->dirty_pg_cnt); 1588 __set_page_dirty_nobuffers(page); 1589 } 1590 1591 if (update_time) { 1592 int release; 1593 struct ubifs_inode *ui = ubifs_inode(inode); 1594 1595 mutex_lock(&ui->ui_mutex); 1596 inode->i_mtime = inode->i_ctime = current_time(inode); 1597 release = ui->dirty; 1598 mark_inode_dirty_sync(inode); 1599 mutex_unlock(&ui->ui_mutex); 1600 if (release) 1601 ubifs_release_dirty_inode_budget(c, ui); 1602 } 1603 1604 wait_for_stable_page(page); 1605 return VM_FAULT_LOCKED; 1606 1607 sigbus: 1608 unlock_page(page); 1609 ubifs_release_budget(c, &req); 1610 return VM_FAULT_SIGBUS; 1611 } 1612 1613 static const struct vm_operations_struct ubifs_file_vm_ops = { 1614 .fault = filemap_fault, 1615 .map_pages = filemap_map_pages, 1616 .page_mkwrite = ubifs_vm_page_mkwrite, 1617 }; 1618 1619 static int ubifs_file_mmap(struct file *file, struct vm_area_struct *vma) 1620 { 1621 int err; 1622 1623 err = generic_file_mmap(file, vma); 1624 if (err) 1625 return err; 1626 vma->vm_ops = &ubifs_file_vm_ops; 1627 1628 if (IS_ENABLED(CONFIG_UBIFS_ATIME_SUPPORT)) 1629 file_accessed(file); 1630 1631 return 0; 1632 } 1633 1634 static const char *ubifs_get_link(struct dentry *dentry, 1635 struct inode *inode, 1636 struct delayed_call *done) 1637 { 1638 struct ubifs_inode *ui = ubifs_inode(inode); 1639 1640 if (!IS_ENCRYPTED(inode)) 1641 return ui->data; 1642 1643 if (!dentry) 1644 return ERR_PTR(-ECHILD); 1645 1646 return fscrypt_get_symlink(inode, ui->data, ui->data_len, done); 1647 } 1648 1649 const struct address_space_operations ubifs_file_address_operations = { 1650 .readpage = ubifs_readpage, 1651 .writepage = ubifs_writepage, 1652 .write_begin = ubifs_write_begin, 1653 .write_end = ubifs_write_end, 1654 .invalidatepage = ubifs_invalidatepage, 1655 .set_page_dirty = ubifs_set_page_dirty, 1656 #ifdef CONFIG_MIGRATION 1657 .migratepage = ubifs_migrate_page, 1658 #endif 1659 .releasepage = ubifs_releasepage, 1660 }; 1661 1662 const struct inode_operations ubifs_file_inode_operations = { 1663 .setattr = ubifs_setattr, 1664 .getattr = ubifs_getattr, 1665 #ifdef CONFIG_UBIFS_FS_XATTR 1666 .listxattr = ubifs_listxattr, 1667 #endif 1668 .update_time = ubifs_update_time, 1669 }; 1670 1671 const struct inode_operations ubifs_symlink_inode_operations = { 1672 .get_link = ubifs_get_link, 1673 .setattr = ubifs_setattr, 1674 .getattr = ubifs_getattr, 1675 #ifdef CONFIG_UBIFS_FS_XATTR 1676 .listxattr = ubifs_listxattr, 1677 #endif 1678 .update_time = ubifs_update_time, 1679 }; 1680 1681 const struct file_operations ubifs_file_operations = { 1682 .llseek = generic_file_llseek, 1683 .read_iter = generic_file_read_iter, 1684 .write_iter = ubifs_write_iter, 1685 .mmap = ubifs_file_mmap, 1686 .fsync = ubifs_fsync, 1687 .unlocked_ioctl = ubifs_ioctl, 1688 .splice_read = generic_file_splice_read, 1689 .splice_write = iter_file_splice_write, 1690 .open = fscrypt_file_open, 1691 #ifdef CONFIG_COMPAT 1692 .compat_ioctl = ubifs_compat_ioctl, 1693 #endif 1694 }; 1695