1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/fs/ext2/inode.c 4 * 5 * Copyright (C) 1992, 1993, 1994, 1995 6 * Remy Card (card@masi.ibp.fr) 7 * Laboratoire MASI - Institut Blaise Pascal 8 * Universite Pierre et Marie Curie (Paris VI) 9 * 10 * from 11 * 12 * linux/fs/minix/inode.c 13 * 14 * Copyright (C) 1991, 1992 Linus Torvalds 15 * 16 * Goal-directed block allocation by Stephen Tweedie 17 * (sct@dcs.ed.ac.uk), 1993, 1998 18 * Big-endian to little-endian byte-swapping/bitmaps by 19 * David S. Miller (davem@caip.rutgers.edu), 1995 20 * 64-bit file support on 64-bit platforms by Jakub Jelinek 21 * (jj@sunsite.ms.mff.cuni.cz) 22 * 23 * Assorted race fixes, rewrite of ext2_get_block() by Al Viro, 2000 24 */ 25 26 #include <linux/time.h> 27 #include <linux/highuid.h> 28 #include <linux/pagemap.h> 29 #include <linux/dax.h> 30 #include <linux/blkdev.h> 31 #include <linux/quotaops.h> 32 #include <linux/writeback.h> 33 #include <linux/buffer_head.h> 34 #include <linux/mpage.h> 35 #include <linux/fiemap.h> 36 #include <linux/iomap.h> 37 #include <linux/namei.h> 38 #include <linux/uio.h> 39 #include <linux/fiemap.h> 40 #include "ext2.h" 41 #include "acl.h" 42 #include "xattr.h" 43 44 static int __ext2_write_inode(struct inode *inode, int do_sync); 45 46 /* 47 * Test whether an inode is a fast symlink. 48 */ 49 static inline int ext2_inode_is_fast_symlink(struct inode *inode) 50 { 51 int ea_blocks = EXT2_I(inode)->i_file_acl ? 52 (inode->i_sb->s_blocksize >> 9) : 0; 53 54 return (S_ISLNK(inode->i_mode) && 55 inode->i_blocks - ea_blocks == 0); 56 } 57 58 static void ext2_truncate_blocks(struct inode *inode, loff_t offset); 59 60 static void ext2_write_failed(struct address_space *mapping, loff_t to) 61 { 62 struct inode *inode = mapping->host; 63 64 if (to > inode->i_size) { 65 truncate_pagecache(inode, inode->i_size); 66 ext2_truncate_blocks(inode, inode->i_size); 67 } 68 } 69 70 /* 71 * Called at the last iput() if i_nlink is zero. 72 */ 73 void ext2_evict_inode(struct inode * inode) 74 { 75 struct ext2_block_alloc_info *rsv; 76 int want_delete = 0; 77 78 if (!inode->i_nlink && !is_bad_inode(inode)) { 79 want_delete = 1; 80 dquot_initialize(inode); 81 } else { 82 dquot_drop(inode); 83 } 84 85 truncate_inode_pages_final(&inode->i_data); 86 87 if (want_delete) { 88 sb_start_intwrite(inode->i_sb); 89 /* set dtime */ 90 EXT2_I(inode)->i_dtime = ktime_get_real_seconds(); 91 mark_inode_dirty(inode); 92 __ext2_write_inode(inode, inode_needs_sync(inode)); 93 /* truncate to 0 */ 94 inode->i_size = 0; 95 if (inode->i_blocks) 96 ext2_truncate_blocks(inode, 0); 97 ext2_xattr_delete_inode(inode); 98 } 99 100 invalidate_inode_buffers(inode); 101 clear_inode(inode); 102 103 ext2_discard_reservation(inode); 104 rsv = EXT2_I(inode)->i_block_alloc_info; 105 EXT2_I(inode)->i_block_alloc_info = NULL; 106 if (unlikely(rsv)) 107 kfree(rsv); 108 109 if (want_delete) { 110 ext2_free_inode(inode); 111 sb_end_intwrite(inode->i_sb); 112 } 113 } 114 115 typedef struct { 116 __le32 *p; 117 __le32 key; 118 struct buffer_head *bh; 119 } Indirect; 120 121 static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v) 122 { 123 p->key = *(p->p = v); 124 p->bh = bh; 125 } 126 127 static inline int verify_chain(Indirect *from, Indirect *to) 128 { 129 while (from <= to && from->key == *from->p) 130 from++; 131 return (from > to); 132 } 133 134 /** 135 * ext2_block_to_path - parse the block number into array of offsets 136 * @inode: inode in question (we are only interested in its superblock) 137 * @i_block: block number to be parsed 138 * @offsets: array to store the offsets in 139 * @boundary: set this non-zero if the referred-to block is likely to be 140 * followed (on disk) by an indirect block. 141 * To store the locations of file's data ext2 uses a data structure common 142 * for UNIX filesystems - tree of pointers anchored in the inode, with 143 * data blocks at leaves and indirect blocks in intermediate nodes. 144 * This function translates the block number into path in that tree - 145 * return value is the path length and @offsets[n] is the offset of 146 * pointer to (n+1)th node in the nth one. If @block is out of range 147 * (negative or too large) warning is printed and zero returned. 148 * 149 * Note: function doesn't find node addresses, so no IO is needed. All 150 * we need to know is the capacity of indirect blocks (taken from the 151 * inode->i_sb). 152 */ 153 154 /* 155 * Portability note: the last comparison (check that we fit into triple 156 * indirect block) is spelled differently, because otherwise on an 157 * architecture with 32-bit longs and 8Kb pages we might get into trouble 158 * if our filesystem had 8Kb blocks. We might use long long, but that would 159 * kill us on x86. Oh, well, at least the sign propagation does not matter - 160 * i_block would have to be negative in the very beginning, so we would not 161 * get there at all. 162 */ 163 164 static int ext2_block_to_path(struct inode *inode, 165 long i_block, int offsets[4], int *boundary) 166 { 167 int ptrs = EXT2_ADDR_PER_BLOCK(inode->i_sb); 168 int ptrs_bits = EXT2_ADDR_PER_BLOCK_BITS(inode->i_sb); 169 const long direct_blocks = EXT2_NDIR_BLOCKS, 170 indirect_blocks = ptrs, 171 double_blocks = (1 << (ptrs_bits * 2)); 172 int n = 0; 173 int final = 0; 174 175 if (i_block < 0) { 176 ext2_msg(inode->i_sb, KERN_WARNING, 177 "warning: %s: block < 0", __func__); 178 } else if (i_block < direct_blocks) { 179 offsets[n++] = i_block; 180 final = direct_blocks; 181 } else if ( (i_block -= direct_blocks) < indirect_blocks) { 182 offsets[n++] = EXT2_IND_BLOCK; 183 offsets[n++] = i_block; 184 final = ptrs; 185 } else if ((i_block -= indirect_blocks) < double_blocks) { 186 offsets[n++] = EXT2_DIND_BLOCK; 187 offsets[n++] = i_block >> ptrs_bits; 188 offsets[n++] = i_block & (ptrs - 1); 189 final = ptrs; 190 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) { 191 offsets[n++] = EXT2_TIND_BLOCK; 192 offsets[n++] = i_block >> (ptrs_bits * 2); 193 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1); 194 offsets[n++] = i_block & (ptrs - 1); 195 final = ptrs; 196 } else { 197 ext2_msg(inode->i_sb, KERN_WARNING, 198 "warning: %s: block is too big", __func__); 199 } 200 if (boundary) 201 *boundary = final - 1 - (i_block & (ptrs - 1)); 202 203 return n; 204 } 205 206 /** 207 * ext2_get_branch - read the chain of indirect blocks leading to data 208 * @inode: inode in question 209 * @depth: depth of the chain (1 - direct pointer, etc.) 210 * @offsets: offsets of pointers in inode/indirect blocks 211 * @chain: place to store the result 212 * @err: here we store the error value 213 * 214 * Function fills the array of triples <key, p, bh> and returns %NULL 215 * if everything went OK or the pointer to the last filled triple 216 * (incomplete one) otherwise. Upon the return chain[i].key contains 217 * the number of (i+1)-th block in the chain (as it is stored in memory, 218 * i.e. little-endian 32-bit), chain[i].p contains the address of that 219 * number (it points into struct inode for i==0 and into the bh->b_data 220 * for i>0) and chain[i].bh points to the buffer_head of i-th indirect 221 * block for i>0 and NULL for i==0. In other words, it holds the block 222 * numbers of the chain, addresses they were taken from (and where we can 223 * verify that chain did not change) and buffer_heads hosting these 224 * numbers. 225 * 226 * Function stops when it stumbles upon zero pointer (absent block) 227 * (pointer to last triple returned, *@err == 0) 228 * or when it gets an IO error reading an indirect block 229 * (ditto, *@err == -EIO) 230 * or when it notices that chain had been changed while it was reading 231 * (ditto, *@err == -EAGAIN) 232 * or when it reads all @depth-1 indirect blocks successfully and finds 233 * the whole chain, all way to the data (returns %NULL, *err == 0). 234 */ 235 static Indirect *ext2_get_branch(struct inode *inode, 236 int depth, 237 int *offsets, 238 Indirect chain[4], 239 int *err) 240 { 241 struct super_block *sb = inode->i_sb; 242 Indirect *p = chain; 243 struct buffer_head *bh; 244 245 *err = 0; 246 /* i_data is not going away, no lock needed */ 247 add_chain (chain, NULL, EXT2_I(inode)->i_data + *offsets); 248 if (!p->key) 249 goto no_block; 250 while (--depth) { 251 bh = sb_bread(sb, le32_to_cpu(p->key)); 252 if (!bh) 253 goto failure; 254 read_lock(&EXT2_I(inode)->i_meta_lock); 255 if (!verify_chain(chain, p)) 256 goto changed; 257 add_chain(++p, bh, (__le32*)bh->b_data + *++offsets); 258 read_unlock(&EXT2_I(inode)->i_meta_lock); 259 if (!p->key) 260 goto no_block; 261 } 262 return NULL; 263 264 changed: 265 read_unlock(&EXT2_I(inode)->i_meta_lock); 266 brelse(bh); 267 *err = -EAGAIN; 268 goto no_block; 269 failure: 270 *err = -EIO; 271 no_block: 272 return p; 273 } 274 275 /** 276 * ext2_find_near - find a place for allocation with sufficient locality 277 * @inode: owner 278 * @ind: descriptor of indirect block. 279 * 280 * This function returns the preferred place for block allocation. 281 * It is used when heuristic for sequential allocation fails. 282 * Rules are: 283 * + if there is a block to the left of our position - allocate near it. 284 * + if pointer will live in indirect block - allocate near that block. 285 * + if pointer will live in inode - allocate in the same cylinder group. 286 * 287 * In the latter case we colour the starting block by the callers PID to 288 * prevent it from clashing with concurrent allocations for a different inode 289 * in the same block group. The PID is used here so that functionally related 290 * files will be close-by on-disk. 291 * 292 * Caller must make sure that @ind is valid and will stay that way. 293 */ 294 295 static ext2_fsblk_t ext2_find_near(struct inode *inode, Indirect *ind) 296 { 297 struct ext2_inode_info *ei = EXT2_I(inode); 298 __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data; 299 __le32 *p; 300 ext2_fsblk_t bg_start; 301 ext2_fsblk_t colour; 302 303 /* Try to find previous block */ 304 for (p = ind->p - 1; p >= start; p--) 305 if (*p) 306 return le32_to_cpu(*p); 307 308 /* No such thing, so let's try location of indirect block */ 309 if (ind->bh) 310 return ind->bh->b_blocknr; 311 312 /* 313 * It is going to be referred from inode itself? OK, just put it into 314 * the same cylinder group then. 315 */ 316 bg_start = ext2_group_first_block_no(inode->i_sb, ei->i_block_group); 317 colour = (current->pid % 16) * 318 (EXT2_BLOCKS_PER_GROUP(inode->i_sb) / 16); 319 return bg_start + colour; 320 } 321 322 /** 323 * ext2_find_goal - find a preferred place for allocation. 324 * @inode: owner 325 * @block: block we want 326 * @partial: pointer to the last triple within a chain 327 * 328 * Returns preferred place for a block (the goal). 329 */ 330 331 static inline ext2_fsblk_t ext2_find_goal(struct inode *inode, long block, 332 Indirect *partial) 333 { 334 struct ext2_block_alloc_info *block_i; 335 336 block_i = EXT2_I(inode)->i_block_alloc_info; 337 338 /* 339 * try the heuristic for sequential allocation, 340 * failing that at least try to get decent locality. 341 */ 342 if (block_i && (block == block_i->last_alloc_logical_block + 1) 343 && (block_i->last_alloc_physical_block != 0)) { 344 return block_i->last_alloc_physical_block + 1; 345 } 346 347 return ext2_find_near(inode, partial); 348 } 349 350 /** 351 * ext2_blks_to_allocate: Look up the block map and count the number 352 * of direct blocks need to be allocated for the given branch. 353 * 354 * @branch: chain of indirect blocks 355 * @k: number of blocks need for indirect blocks 356 * @blks: number of data blocks to be mapped. 357 * @blocks_to_boundary: the offset in the indirect block 358 * 359 * return the total number of blocks to be allocate, including the 360 * direct and indirect blocks. 361 */ 362 static int 363 ext2_blks_to_allocate(Indirect * branch, int k, unsigned long blks, 364 int blocks_to_boundary) 365 { 366 unsigned long count = 0; 367 368 /* 369 * Simple case, [t,d]Indirect block(s) has not allocated yet 370 * then it's clear blocks on that path have not allocated 371 */ 372 if (k > 0) { 373 /* right now don't hanel cross boundary allocation */ 374 if (blks < blocks_to_boundary + 1) 375 count += blks; 376 else 377 count += blocks_to_boundary + 1; 378 return count; 379 } 380 381 count++; 382 while (count < blks && count <= blocks_to_boundary 383 && le32_to_cpu(*(branch[0].p + count)) == 0) { 384 count++; 385 } 386 return count; 387 } 388 389 /** 390 * ext2_alloc_blocks: multiple allocate blocks needed for a branch 391 * @indirect_blks: the number of blocks need to allocate for indirect 392 * blocks 393 * 394 * @new_blocks: on return it will store the new block numbers for 395 * the indirect blocks(if needed) and the first direct block, 396 * @blks: on return it will store the total number of allocated 397 * direct blocks 398 */ 399 static int ext2_alloc_blocks(struct inode *inode, 400 ext2_fsblk_t goal, int indirect_blks, int blks, 401 ext2_fsblk_t new_blocks[4], int *err) 402 { 403 int target, i; 404 unsigned long count = 0; 405 int index = 0; 406 ext2_fsblk_t current_block = 0; 407 int ret = 0; 408 409 /* 410 * Here we try to allocate the requested multiple blocks at once, 411 * on a best-effort basis. 412 * To build a branch, we should allocate blocks for 413 * the indirect blocks(if not allocated yet), and at least 414 * the first direct block of this branch. That's the 415 * minimum number of blocks need to allocate(required) 416 */ 417 target = blks + indirect_blks; 418 419 while (1) { 420 count = target; 421 /* allocating blocks for indirect blocks and direct blocks */ 422 current_block = ext2_new_blocks(inode,goal,&count,err); 423 if (*err) 424 goto failed_out; 425 426 target -= count; 427 /* allocate blocks for indirect blocks */ 428 while (index < indirect_blks && count) { 429 new_blocks[index++] = current_block++; 430 count--; 431 } 432 433 if (count > 0) 434 break; 435 } 436 437 /* save the new block number for the first direct block */ 438 new_blocks[index] = current_block; 439 440 /* total number of blocks allocated for direct blocks */ 441 ret = count; 442 *err = 0; 443 return ret; 444 failed_out: 445 for (i = 0; i <index; i++) 446 ext2_free_blocks(inode, new_blocks[i], 1); 447 if (index) 448 mark_inode_dirty(inode); 449 return ret; 450 } 451 452 /** 453 * ext2_alloc_branch - allocate and set up a chain of blocks. 454 * @inode: owner 455 * @indirect_blks: depth of the chain (number of blocks to allocate) 456 * @blks: number of allocated direct blocks 457 * @goal: preferred place for allocation 458 * @offsets: offsets (in the blocks) to store the pointers to next. 459 * @branch: place to store the chain in. 460 * 461 * This function allocates @num blocks, zeroes out all but the last one, 462 * links them into chain and (if we are synchronous) writes them to disk. 463 * In other words, it prepares a branch that can be spliced onto the 464 * inode. It stores the information about that chain in the branch[], in 465 * the same format as ext2_get_branch() would do. We are calling it after 466 * we had read the existing part of chain and partial points to the last 467 * triple of that (one with zero ->key). Upon the exit we have the same 468 * picture as after the successful ext2_get_block(), except that in one 469 * place chain is disconnected - *branch->p is still zero (we did not 470 * set the last link), but branch->key contains the number that should 471 * be placed into *branch->p to fill that gap. 472 * 473 * If allocation fails we free all blocks we've allocated (and forget 474 * their buffer_heads) and return the error value the from failed 475 * ext2_alloc_block() (normally -ENOSPC). Otherwise we set the chain 476 * as described above and return 0. 477 */ 478 479 static int ext2_alloc_branch(struct inode *inode, 480 int indirect_blks, int *blks, ext2_fsblk_t goal, 481 int *offsets, Indirect *branch) 482 { 483 int blocksize = inode->i_sb->s_blocksize; 484 int i, n = 0; 485 int err = 0; 486 struct buffer_head *bh; 487 int num; 488 ext2_fsblk_t new_blocks[4]; 489 ext2_fsblk_t current_block; 490 491 num = ext2_alloc_blocks(inode, goal, indirect_blks, 492 *blks, new_blocks, &err); 493 if (err) 494 return err; 495 496 branch[0].key = cpu_to_le32(new_blocks[0]); 497 /* 498 * metadata blocks and data blocks are allocated. 499 */ 500 for (n = 1; n <= indirect_blks; n++) { 501 /* 502 * Get buffer_head for parent block, zero it out 503 * and set the pointer to new one, then send 504 * parent to disk. 505 */ 506 bh = sb_getblk(inode->i_sb, new_blocks[n-1]); 507 if (unlikely(!bh)) { 508 err = -ENOMEM; 509 goto failed; 510 } 511 branch[n].bh = bh; 512 lock_buffer(bh); 513 memset(bh->b_data, 0, blocksize); 514 branch[n].p = (__le32 *) bh->b_data + offsets[n]; 515 branch[n].key = cpu_to_le32(new_blocks[n]); 516 *branch[n].p = branch[n].key; 517 if ( n == indirect_blks) { 518 current_block = new_blocks[n]; 519 /* 520 * End of chain, update the last new metablock of 521 * the chain to point to the new allocated 522 * data blocks numbers 523 */ 524 for (i=1; i < num; i++) 525 *(branch[n].p + i) = cpu_to_le32(++current_block); 526 } 527 set_buffer_uptodate(bh); 528 unlock_buffer(bh); 529 mark_buffer_dirty_inode(bh, inode); 530 /* We used to sync bh here if IS_SYNC(inode). 531 * But we now rely upon generic_write_sync() 532 * and b_inode_buffers. But not for directories. 533 */ 534 if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode)) 535 sync_dirty_buffer(bh); 536 } 537 *blks = num; 538 return err; 539 540 failed: 541 for (i = 1; i < n; i++) 542 bforget(branch[i].bh); 543 for (i = 0; i < indirect_blks; i++) 544 ext2_free_blocks(inode, new_blocks[i], 1); 545 ext2_free_blocks(inode, new_blocks[i], num); 546 return err; 547 } 548 549 /** 550 * ext2_splice_branch - splice the allocated branch onto inode. 551 * @inode: owner 552 * @block: (logical) number of block we are adding 553 * @where: location of missing link 554 * @num: number of indirect blocks we are adding 555 * @blks: number of direct blocks we are adding 556 * 557 * This function fills the missing link and does all housekeeping needed in 558 * inode (->i_blocks, etc.). In case of success we end up with the full 559 * chain to new block and return 0. 560 */ 561 static void ext2_splice_branch(struct inode *inode, 562 long block, Indirect *where, int num, int blks) 563 { 564 int i; 565 struct ext2_block_alloc_info *block_i; 566 ext2_fsblk_t current_block; 567 568 block_i = EXT2_I(inode)->i_block_alloc_info; 569 570 /* XXX LOCKING probably should have i_meta_lock ?*/ 571 /* That's it */ 572 573 *where->p = where->key; 574 575 /* 576 * Update the host buffer_head or inode to point to more just allocated 577 * direct blocks blocks 578 */ 579 if (num == 0 && blks > 1) { 580 current_block = le32_to_cpu(where->key) + 1; 581 for (i = 1; i < blks; i++) 582 *(where->p + i ) = cpu_to_le32(current_block++); 583 } 584 585 /* 586 * update the most recently allocated logical & physical block 587 * in i_block_alloc_info, to assist find the proper goal block for next 588 * allocation 589 */ 590 if (block_i) { 591 block_i->last_alloc_logical_block = block + blks - 1; 592 block_i->last_alloc_physical_block = 593 le32_to_cpu(where[num].key) + blks - 1; 594 } 595 596 /* We are done with atomic stuff, now do the rest of housekeeping */ 597 598 /* had we spliced it onto indirect block? */ 599 if (where->bh) 600 mark_buffer_dirty_inode(where->bh, inode); 601 602 inode->i_ctime = current_time(inode); 603 mark_inode_dirty(inode); 604 } 605 606 /* 607 * Allocation strategy is simple: if we have to allocate something, we will 608 * have to go the whole way to leaf. So let's do it before attaching anything 609 * to tree, set linkage between the newborn blocks, write them if sync is 610 * required, recheck the path, free and repeat if check fails, otherwise 611 * set the last missing link (that will protect us from any truncate-generated 612 * removals - all blocks on the path are immune now) and possibly force the 613 * write on the parent block. 614 * That has a nice additional property: no special recovery from the failed 615 * allocations is needed - we simply release blocks and do not touch anything 616 * reachable from inode. 617 * 618 * `handle' can be NULL if create == 0. 619 * 620 * return > 0, # of blocks mapped or allocated. 621 * return = 0, if plain lookup failed. 622 * return < 0, error case. 623 */ 624 static int ext2_get_blocks(struct inode *inode, 625 sector_t iblock, unsigned long maxblocks, 626 u32 *bno, bool *new, bool *boundary, 627 int create) 628 { 629 int err; 630 int offsets[4]; 631 Indirect chain[4]; 632 Indirect *partial; 633 ext2_fsblk_t goal; 634 int indirect_blks; 635 int blocks_to_boundary = 0; 636 int depth; 637 struct ext2_inode_info *ei = EXT2_I(inode); 638 int count = 0; 639 ext2_fsblk_t first_block = 0; 640 641 BUG_ON(maxblocks == 0); 642 643 depth = ext2_block_to_path(inode,iblock,offsets,&blocks_to_boundary); 644 645 if (depth == 0) 646 return -EIO; 647 648 partial = ext2_get_branch(inode, depth, offsets, chain, &err); 649 /* Simplest case - block found, no allocation needed */ 650 if (!partial) { 651 first_block = le32_to_cpu(chain[depth - 1].key); 652 count++; 653 /*map more blocks*/ 654 while (count < maxblocks && count <= blocks_to_boundary) { 655 ext2_fsblk_t blk; 656 657 if (!verify_chain(chain, chain + depth - 1)) { 658 /* 659 * Indirect block might be removed by 660 * truncate while we were reading it. 661 * Handling of that case: forget what we've 662 * got now, go to reread. 663 */ 664 err = -EAGAIN; 665 count = 0; 666 partial = chain + depth - 1; 667 break; 668 } 669 blk = le32_to_cpu(*(chain[depth-1].p + count)); 670 if (blk == first_block + count) 671 count++; 672 else 673 break; 674 } 675 if (err != -EAGAIN) 676 goto got_it; 677 } 678 679 /* Next simple case - plain lookup or failed read of indirect block */ 680 if (!create || err == -EIO) 681 goto cleanup; 682 683 mutex_lock(&ei->truncate_mutex); 684 /* 685 * If the indirect block is missing while we are reading 686 * the chain(ext2_get_branch() returns -EAGAIN err), or 687 * if the chain has been changed after we grab the semaphore, 688 * (either because another process truncated this branch, or 689 * another get_block allocated this branch) re-grab the chain to see if 690 * the request block has been allocated or not. 691 * 692 * Since we already block the truncate/other get_block 693 * at this point, we will have the current copy of the chain when we 694 * splice the branch into the tree. 695 */ 696 if (err == -EAGAIN || !verify_chain(chain, partial)) { 697 while (partial > chain) { 698 brelse(partial->bh); 699 partial--; 700 } 701 partial = ext2_get_branch(inode, depth, offsets, chain, &err); 702 if (!partial) { 703 count++; 704 mutex_unlock(&ei->truncate_mutex); 705 goto got_it; 706 } 707 708 if (err) { 709 mutex_unlock(&ei->truncate_mutex); 710 goto cleanup; 711 } 712 } 713 714 /* 715 * Okay, we need to do block allocation. Lazily initialize the block 716 * allocation info here if necessary 717 */ 718 if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info)) 719 ext2_init_block_alloc_info(inode); 720 721 goal = ext2_find_goal(inode, iblock, partial); 722 723 /* the number of blocks need to allocate for [d,t]indirect blocks */ 724 indirect_blks = (chain + depth) - partial - 1; 725 /* 726 * Next look up the indirect map to count the total number of 727 * direct blocks to allocate for this branch. 728 */ 729 count = ext2_blks_to_allocate(partial, indirect_blks, 730 maxblocks, blocks_to_boundary); 731 /* 732 * XXX ???? Block out ext2_truncate while we alter the tree 733 */ 734 err = ext2_alloc_branch(inode, indirect_blks, &count, goal, 735 offsets + (partial - chain), partial); 736 737 if (err) { 738 mutex_unlock(&ei->truncate_mutex); 739 goto cleanup; 740 } 741 742 if (IS_DAX(inode)) { 743 /* 744 * We must unmap blocks before zeroing so that writeback cannot 745 * overwrite zeros with stale data from block device page cache. 746 */ 747 clean_bdev_aliases(inode->i_sb->s_bdev, 748 le32_to_cpu(chain[depth-1].key), 749 count); 750 /* 751 * block must be initialised before we put it in the tree 752 * so that it's not found by another thread before it's 753 * initialised 754 */ 755 err = sb_issue_zeroout(inode->i_sb, 756 le32_to_cpu(chain[depth-1].key), count, 757 GFP_NOFS); 758 if (err) { 759 mutex_unlock(&ei->truncate_mutex); 760 goto cleanup; 761 } 762 } 763 *new = true; 764 765 ext2_splice_branch(inode, iblock, partial, indirect_blks, count); 766 mutex_unlock(&ei->truncate_mutex); 767 got_it: 768 if (count > blocks_to_boundary) 769 *boundary = true; 770 err = count; 771 /* Clean up and exit */ 772 partial = chain + depth - 1; /* the whole chain */ 773 cleanup: 774 while (partial > chain) { 775 brelse(partial->bh); 776 partial--; 777 } 778 if (err > 0) 779 *bno = le32_to_cpu(chain[depth-1].key); 780 return err; 781 } 782 783 int ext2_get_block(struct inode *inode, sector_t iblock, 784 struct buffer_head *bh_result, int create) 785 { 786 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits; 787 bool new = false, boundary = false; 788 u32 bno; 789 int ret; 790 791 ret = ext2_get_blocks(inode, iblock, max_blocks, &bno, &new, &boundary, 792 create); 793 if (ret <= 0) 794 return ret; 795 796 map_bh(bh_result, inode->i_sb, bno); 797 bh_result->b_size = (ret << inode->i_blkbits); 798 if (new) 799 set_buffer_new(bh_result); 800 if (boundary) 801 set_buffer_boundary(bh_result); 802 return 0; 803 804 } 805 806 #ifdef CONFIG_FS_DAX 807 static int ext2_iomap_begin(struct inode *inode, loff_t offset, loff_t length, 808 unsigned flags, struct iomap *iomap, struct iomap *srcmap) 809 { 810 unsigned int blkbits = inode->i_blkbits; 811 unsigned long first_block = offset >> blkbits; 812 unsigned long max_blocks = (length + (1 << blkbits) - 1) >> blkbits; 813 struct ext2_sb_info *sbi = EXT2_SB(inode->i_sb); 814 bool new = false, boundary = false; 815 u32 bno; 816 int ret; 817 818 ret = ext2_get_blocks(inode, first_block, max_blocks, 819 &bno, &new, &boundary, flags & IOMAP_WRITE); 820 if (ret < 0) 821 return ret; 822 823 iomap->flags = 0; 824 iomap->bdev = inode->i_sb->s_bdev; 825 iomap->offset = (u64)first_block << blkbits; 826 iomap->dax_dev = sbi->s_daxdev; 827 828 if (ret == 0) { 829 iomap->type = IOMAP_HOLE; 830 iomap->addr = IOMAP_NULL_ADDR; 831 iomap->length = 1 << blkbits; 832 } else { 833 iomap->type = IOMAP_MAPPED; 834 iomap->addr = (u64)bno << blkbits; 835 iomap->length = (u64)ret << blkbits; 836 iomap->flags |= IOMAP_F_MERGED; 837 } 838 839 if (new) 840 iomap->flags |= IOMAP_F_NEW; 841 return 0; 842 } 843 844 static int 845 ext2_iomap_end(struct inode *inode, loff_t offset, loff_t length, 846 ssize_t written, unsigned flags, struct iomap *iomap) 847 { 848 if (iomap->type == IOMAP_MAPPED && 849 written < length && 850 (flags & IOMAP_WRITE)) 851 ext2_write_failed(inode->i_mapping, offset + length); 852 return 0; 853 } 854 855 const struct iomap_ops ext2_iomap_ops = { 856 .iomap_begin = ext2_iomap_begin, 857 .iomap_end = ext2_iomap_end, 858 }; 859 #else 860 /* Define empty ops for !CONFIG_FS_DAX case to avoid ugly ifdefs */ 861 const struct iomap_ops ext2_iomap_ops; 862 #endif /* CONFIG_FS_DAX */ 863 864 int ext2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 865 u64 start, u64 len) 866 { 867 return generic_block_fiemap(inode, fieinfo, start, len, 868 ext2_get_block); 869 } 870 871 static int ext2_writepage(struct page *page, struct writeback_control *wbc) 872 { 873 return block_write_full_page(page, ext2_get_block, wbc); 874 } 875 876 static int ext2_readpage(struct file *file, struct page *page) 877 { 878 return mpage_readpage(page, ext2_get_block); 879 } 880 881 static void ext2_readahead(struct readahead_control *rac) 882 { 883 mpage_readahead(rac, ext2_get_block); 884 } 885 886 static int 887 ext2_write_begin(struct file *file, struct address_space *mapping, 888 loff_t pos, unsigned len, unsigned flags, 889 struct page **pagep, void **fsdata) 890 { 891 int ret; 892 893 ret = block_write_begin(mapping, pos, len, flags, pagep, 894 ext2_get_block); 895 if (ret < 0) 896 ext2_write_failed(mapping, pos + len); 897 return ret; 898 } 899 900 static int ext2_write_end(struct file *file, struct address_space *mapping, 901 loff_t pos, unsigned len, unsigned copied, 902 struct page *page, void *fsdata) 903 { 904 int ret; 905 906 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); 907 if (ret < len) 908 ext2_write_failed(mapping, pos + len); 909 return ret; 910 } 911 912 static int 913 ext2_nobh_write_begin(struct file *file, struct address_space *mapping, 914 loff_t pos, unsigned len, unsigned flags, 915 struct page **pagep, void **fsdata) 916 { 917 int ret; 918 919 ret = nobh_write_begin(mapping, pos, len, flags, pagep, fsdata, 920 ext2_get_block); 921 if (ret < 0) 922 ext2_write_failed(mapping, pos + len); 923 return ret; 924 } 925 926 static int ext2_nobh_writepage(struct page *page, 927 struct writeback_control *wbc) 928 { 929 return nobh_writepage(page, ext2_get_block, wbc); 930 } 931 932 static sector_t ext2_bmap(struct address_space *mapping, sector_t block) 933 { 934 return generic_block_bmap(mapping,block,ext2_get_block); 935 } 936 937 static ssize_t 938 ext2_direct_IO(struct kiocb *iocb, struct iov_iter *iter) 939 { 940 struct file *file = iocb->ki_filp; 941 struct address_space *mapping = file->f_mapping; 942 struct inode *inode = mapping->host; 943 size_t count = iov_iter_count(iter); 944 loff_t offset = iocb->ki_pos; 945 ssize_t ret; 946 947 ret = blockdev_direct_IO(iocb, inode, iter, ext2_get_block); 948 if (ret < 0 && iov_iter_rw(iter) == WRITE) 949 ext2_write_failed(mapping, offset + count); 950 return ret; 951 } 952 953 static int 954 ext2_writepages(struct address_space *mapping, struct writeback_control *wbc) 955 { 956 return mpage_writepages(mapping, wbc, ext2_get_block); 957 } 958 959 static int 960 ext2_dax_writepages(struct address_space *mapping, struct writeback_control *wbc) 961 { 962 struct ext2_sb_info *sbi = EXT2_SB(mapping->host->i_sb); 963 964 return dax_writeback_mapping_range(mapping, sbi->s_daxdev, wbc); 965 } 966 967 const struct address_space_operations ext2_aops = { 968 .readpage = ext2_readpage, 969 .readahead = ext2_readahead, 970 .writepage = ext2_writepage, 971 .write_begin = ext2_write_begin, 972 .write_end = ext2_write_end, 973 .bmap = ext2_bmap, 974 .direct_IO = ext2_direct_IO, 975 .writepages = ext2_writepages, 976 .migratepage = buffer_migrate_page, 977 .is_partially_uptodate = block_is_partially_uptodate, 978 .error_remove_page = generic_error_remove_page, 979 }; 980 981 const struct address_space_operations ext2_nobh_aops = { 982 .readpage = ext2_readpage, 983 .readahead = ext2_readahead, 984 .writepage = ext2_nobh_writepage, 985 .write_begin = ext2_nobh_write_begin, 986 .write_end = nobh_write_end, 987 .bmap = ext2_bmap, 988 .direct_IO = ext2_direct_IO, 989 .writepages = ext2_writepages, 990 .migratepage = buffer_migrate_page, 991 .error_remove_page = generic_error_remove_page, 992 }; 993 994 static const struct address_space_operations ext2_dax_aops = { 995 .writepages = ext2_dax_writepages, 996 .direct_IO = noop_direct_IO, 997 .set_page_dirty = noop_set_page_dirty, 998 .invalidatepage = noop_invalidatepage, 999 }; 1000 1001 /* 1002 * Probably it should be a library function... search for first non-zero word 1003 * or memcmp with zero_page, whatever is better for particular architecture. 1004 * Linus? 1005 */ 1006 static inline int all_zeroes(__le32 *p, __le32 *q) 1007 { 1008 while (p < q) 1009 if (*p++) 1010 return 0; 1011 return 1; 1012 } 1013 1014 /** 1015 * ext2_find_shared - find the indirect blocks for partial truncation. 1016 * @inode: inode in question 1017 * @depth: depth of the affected branch 1018 * @offsets: offsets of pointers in that branch (see ext2_block_to_path) 1019 * @chain: place to store the pointers to partial indirect blocks 1020 * @top: place to the (detached) top of branch 1021 * 1022 * This is a helper function used by ext2_truncate(). 1023 * 1024 * When we do truncate() we may have to clean the ends of several indirect 1025 * blocks but leave the blocks themselves alive. Block is partially 1026 * truncated if some data below the new i_size is referred from it (and 1027 * it is on the path to the first completely truncated data block, indeed). 1028 * We have to free the top of that path along with everything to the right 1029 * of the path. Since no allocation past the truncation point is possible 1030 * until ext2_truncate() finishes, we may safely do the latter, but top 1031 * of branch may require special attention - pageout below the truncation 1032 * point might try to populate it. 1033 * 1034 * We atomically detach the top of branch from the tree, store the block 1035 * number of its root in *@top, pointers to buffer_heads of partially 1036 * truncated blocks - in @chain[].bh and pointers to their last elements 1037 * that should not be removed - in @chain[].p. Return value is the pointer 1038 * to last filled element of @chain. 1039 * 1040 * The work left to caller to do the actual freeing of subtrees: 1041 * a) free the subtree starting from *@top 1042 * b) free the subtrees whose roots are stored in 1043 * (@chain[i].p+1 .. end of @chain[i].bh->b_data) 1044 * c) free the subtrees growing from the inode past the @chain[0].p 1045 * (no partially truncated stuff there). 1046 */ 1047 1048 static Indirect *ext2_find_shared(struct inode *inode, 1049 int depth, 1050 int offsets[4], 1051 Indirect chain[4], 1052 __le32 *top) 1053 { 1054 Indirect *partial, *p; 1055 int k, err; 1056 1057 *top = 0; 1058 for (k = depth; k > 1 && !offsets[k-1]; k--) 1059 ; 1060 partial = ext2_get_branch(inode, k, offsets, chain, &err); 1061 if (!partial) 1062 partial = chain + k-1; 1063 /* 1064 * If the branch acquired continuation since we've looked at it - 1065 * fine, it should all survive and (new) top doesn't belong to us. 1066 */ 1067 write_lock(&EXT2_I(inode)->i_meta_lock); 1068 if (!partial->key && *partial->p) { 1069 write_unlock(&EXT2_I(inode)->i_meta_lock); 1070 goto no_top; 1071 } 1072 for (p=partial; p>chain && all_zeroes((__le32*)p->bh->b_data,p->p); p--) 1073 ; 1074 /* 1075 * OK, we've found the last block that must survive. The rest of our 1076 * branch should be detached before unlocking. However, if that rest 1077 * of branch is all ours and does not grow immediately from the inode 1078 * it's easier to cheat and just decrement partial->p. 1079 */ 1080 if (p == chain + k - 1 && p > chain) { 1081 p->p--; 1082 } else { 1083 *top = *p->p; 1084 *p->p = 0; 1085 } 1086 write_unlock(&EXT2_I(inode)->i_meta_lock); 1087 1088 while(partial > p) 1089 { 1090 brelse(partial->bh); 1091 partial--; 1092 } 1093 no_top: 1094 return partial; 1095 } 1096 1097 /** 1098 * ext2_free_data - free a list of data blocks 1099 * @inode: inode we are dealing with 1100 * @p: array of block numbers 1101 * @q: points immediately past the end of array 1102 * 1103 * We are freeing all blocks referred from that array (numbers are 1104 * stored as little-endian 32-bit) and updating @inode->i_blocks 1105 * appropriately. 1106 */ 1107 static inline void ext2_free_data(struct inode *inode, __le32 *p, __le32 *q) 1108 { 1109 unsigned long block_to_free = 0, count = 0; 1110 unsigned long nr; 1111 1112 for ( ; p < q ; p++) { 1113 nr = le32_to_cpu(*p); 1114 if (nr) { 1115 *p = 0; 1116 /* accumulate blocks to free if they're contiguous */ 1117 if (count == 0) 1118 goto free_this; 1119 else if (block_to_free == nr - count) 1120 count++; 1121 else { 1122 ext2_free_blocks (inode, block_to_free, count); 1123 mark_inode_dirty(inode); 1124 free_this: 1125 block_to_free = nr; 1126 count = 1; 1127 } 1128 } 1129 } 1130 if (count > 0) { 1131 ext2_free_blocks (inode, block_to_free, count); 1132 mark_inode_dirty(inode); 1133 } 1134 } 1135 1136 /** 1137 * ext2_free_branches - free an array of branches 1138 * @inode: inode we are dealing with 1139 * @p: array of block numbers 1140 * @q: pointer immediately past the end of array 1141 * @depth: depth of the branches to free 1142 * 1143 * We are freeing all blocks referred from these branches (numbers are 1144 * stored as little-endian 32-bit) and updating @inode->i_blocks 1145 * appropriately. 1146 */ 1147 static void ext2_free_branches(struct inode *inode, __le32 *p, __le32 *q, int depth) 1148 { 1149 struct buffer_head * bh; 1150 unsigned long nr; 1151 1152 if (depth--) { 1153 int addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb); 1154 for ( ; p < q ; p++) { 1155 nr = le32_to_cpu(*p); 1156 if (!nr) 1157 continue; 1158 *p = 0; 1159 bh = sb_bread(inode->i_sb, nr); 1160 /* 1161 * A read failure? Report error and clear slot 1162 * (should be rare). 1163 */ 1164 if (!bh) { 1165 ext2_error(inode->i_sb, "ext2_free_branches", 1166 "Read failure, inode=%ld, block=%ld", 1167 inode->i_ino, nr); 1168 continue; 1169 } 1170 ext2_free_branches(inode, 1171 (__le32*)bh->b_data, 1172 (__le32*)bh->b_data + addr_per_block, 1173 depth); 1174 bforget(bh); 1175 ext2_free_blocks(inode, nr, 1); 1176 mark_inode_dirty(inode); 1177 } 1178 } else 1179 ext2_free_data(inode, p, q); 1180 } 1181 1182 /* dax_sem must be held when calling this function */ 1183 static void __ext2_truncate_blocks(struct inode *inode, loff_t offset) 1184 { 1185 __le32 *i_data = EXT2_I(inode)->i_data; 1186 struct ext2_inode_info *ei = EXT2_I(inode); 1187 int addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb); 1188 int offsets[4]; 1189 Indirect chain[4]; 1190 Indirect *partial; 1191 __le32 nr = 0; 1192 int n; 1193 long iblock; 1194 unsigned blocksize; 1195 blocksize = inode->i_sb->s_blocksize; 1196 iblock = (offset + blocksize-1) >> EXT2_BLOCK_SIZE_BITS(inode->i_sb); 1197 1198 #ifdef CONFIG_FS_DAX 1199 WARN_ON(!rwsem_is_locked(&ei->dax_sem)); 1200 #endif 1201 1202 n = ext2_block_to_path(inode, iblock, offsets, NULL); 1203 if (n == 0) 1204 return; 1205 1206 /* 1207 * From here we block out all ext2_get_block() callers who want to 1208 * modify the block allocation tree. 1209 */ 1210 mutex_lock(&ei->truncate_mutex); 1211 1212 if (n == 1) { 1213 ext2_free_data(inode, i_data+offsets[0], 1214 i_data + EXT2_NDIR_BLOCKS); 1215 goto do_indirects; 1216 } 1217 1218 partial = ext2_find_shared(inode, n, offsets, chain, &nr); 1219 /* Kill the top of shared branch (already detached) */ 1220 if (nr) { 1221 if (partial == chain) 1222 mark_inode_dirty(inode); 1223 else 1224 mark_buffer_dirty_inode(partial->bh, inode); 1225 ext2_free_branches(inode, &nr, &nr+1, (chain+n-1) - partial); 1226 } 1227 /* Clear the ends of indirect blocks on the shared branch */ 1228 while (partial > chain) { 1229 ext2_free_branches(inode, 1230 partial->p + 1, 1231 (__le32*)partial->bh->b_data+addr_per_block, 1232 (chain+n-1) - partial); 1233 mark_buffer_dirty_inode(partial->bh, inode); 1234 brelse (partial->bh); 1235 partial--; 1236 } 1237 do_indirects: 1238 /* Kill the remaining (whole) subtrees */ 1239 switch (offsets[0]) { 1240 default: 1241 nr = i_data[EXT2_IND_BLOCK]; 1242 if (nr) { 1243 i_data[EXT2_IND_BLOCK] = 0; 1244 mark_inode_dirty(inode); 1245 ext2_free_branches(inode, &nr, &nr+1, 1); 1246 } 1247 /* fall through */ 1248 case EXT2_IND_BLOCK: 1249 nr = i_data[EXT2_DIND_BLOCK]; 1250 if (nr) { 1251 i_data[EXT2_DIND_BLOCK] = 0; 1252 mark_inode_dirty(inode); 1253 ext2_free_branches(inode, &nr, &nr+1, 2); 1254 } 1255 /* fall through */ 1256 case EXT2_DIND_BLOCK: 1257 nr = i_data[EXT2_TIND_BLOCK]; 1258 if (nr) { 1259 i_data[EXT2_TIND_BLOCK] = 0; 1260 mark_inode_dirty(inode); 1261 ext2_free_branches(inode, &nr, &nr+1, 3); 1262 } 1263 case EXT2_TIND_BLOCK: 1264 ; 1265 } 1266 1267 ext2_discard_reservation(inode); 1268 1269 mutex_unlock(&ei->truncate_mutex); 1270 } 1271 1272 static void ext2_truncate_blocks(struct inode *inode, loff_t offset) 1273 { 1274 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 1275 S_ISLNK(inode->i_mode))) 1276 return; 1277 if (ext2_inode_is_fast_symlink(inode)) 1278 return; 1279 1280 dax_sem_down_write(EXT2_I(inode)); 1281 __ext2_truncate_blocks(inode, offset); 1282 dax_sem_up_write(EXT2_I(inode)); 1283 } 1284 1285 static int ext2_setsize(struct inode *inode, loff_t newsize) 1286 { 1287 int error; 1288 1289 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 1290 S_ISLNK(inode->i_mode))) 1291 return -EINVAL; 1292 if (ext2_inode_is_fast_symlink(inode)) 1293 return -EINVAL; 1294 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) 1295 return -EPERM; 1296 1297 inode_dio_wait(inode); 1298 1299 if (IS_DAX(inode)) { 1300 error = iomap_zero_range(inode, newsize, 1301 PAGE_ALIGN(newsize) - newsize, NULL, 1302 &ext2_iomap_ops); 1303 } else if (test_opt(inode->i_sb, NOBH)) 1304 error = nobh_truncate_page(inode->i_mapping, 1305 newsize, ext2_get_block); 1306 else 1307 error = block_truncate_page(inode->i_mapping, 1308 newsize, ext2_get_block); 1309 if (error) 1310 return error; 1311 1312 dax_sem_down_write(EXT2_I(inode)); 1313 truncate_setsize(inode, newsize); 1314 __ext2_truncate_blocks(inode, newsize); 1315 dax_sem_up_write(EXT2_I(inode)); 1316 1317 inode->i_mtime = inode->i_ctime = current_time(inode); 1318 if (inode_needs_sync(inode)) { 1319 sync_mapping_buffers(inode->i_mapping); 1320 sync_inode_metadata(inode, 1); 1321 } else { 1322 mark_inode_dirty(inode); 1323 } 1324 1325 return 0; 1326 } 1327 1328 static struct ext2_inode *ext2_get_inode(struct super_block *sb, ino_t ino, 1329 struct buffer_head **p) 1330 { 1331 struct buffer_head * bh; 1332 unsigned long block_group; 1333 unsigned long block; 1334 unsigned long offset; 1335 struct ext2_group_desc * gdp; 1336 1337 *p = NULL; 1338 if ((ino != EXT2_ROOT_INO && ino < EXT2_FIRST_INO(sb)) || 1339 ino > le32_to_cpu(EXT2_SB(sb)->s_es->s_inodes_count)) 1340 goto Einval; 1341 1342 block_group = (ino - 1) / EXT2_INODES_PER_GROUP(sb); 1343 gdp = ext2_get_group_desc(sb, block_group, NULL); 1344 if (!gdp) 1345 goto Egdp; 1346 /* 1347 * Figure out the offset within the block group inode table 1348 */ 1349 offset = ((ino - 1) % EXT2_INODES_PER_GROUP(sb)) * EXT2_INODE_SIZE(sb); 1350 block = le32_to_cpu(gdp->bg_inode_table) + 1351 (offset >> EXT2_BLOCK_SIZE_BITS(sb)); 1352 if (!(bh = sb_bread(sb, block))) 1353 goto Eio; 1354 1355 *p = bh; 1356 offset &= (EXT2_BLOCK_SIZE(sb) - 1); 1357 return (struct ext2_inode *) (bh->b_data + offset); 1358 1359 Einval: 1360 ext2_error(sb, "ext2_get_inode", "bad inode number: %lu", 1361 (unsigned long) ino); 1362 return ERR_PTR(-EINVAL); 1363 Eio: 1364 ext2_error(sb, "ext2_get_inode", 1365 "unable to read inode block - inode=%lu, block=%lu", 1366 (unsigned long) ino, block); 1367 Egdp: 1368 return ERR_PTR(-EIO); 1369 } 1370 1371 void ext2_set_inode_flags(struct inode *inode) 1372 { 1373 unsigned int flags = EXT2_I(inode)->i_flags; 1374 1375 inode->i_flags &= ~(S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME | 1376 S_DIRSYNC | S_DAX); 1377 if (flags & EXT2_SYNC_FL) 1378 inode->i_flags |= S_SYNC; 1379 if (flags & EXT2_APPEND_FL) 1380 inode->i_flags |= S_APPEND; 1381 if (flags & EXT2_IMMUTABLE_FL) 1382 inode->i_flags |= S_IMMUTABLE; 1383 if (flags & EXT2_NOATIME_FL) 1384 inode->i_flags |= S_NOATIME; 1385 if (flags & EXT2_DIRSYNC_FL) 1386 inode->i_flags |= S_DIRSYNC; 1387 if (test_opt(inode->i_sb, DAX) && S_ISREG(inode->i_mode)) 1388 inode->i_flags |= S_DAX; 1389 } 1390 1391 void ext2_set_file_ops(struct inode *inode) 1392 { 1393 inode->i_op = &ext2_file_inode_operations; 1394 inode->i_fop = &ext2_file_operations; 1395 if (IS_DAX(inode)) 1396 inode->i_mapping->a_ops = &ext2_dax_aops; 1397 else if (test_opt(inode->i_sb, NOBH)) 1398 inode->i_mapping->a_ops = &ext2_nobh_aops; 1399 else 1400 inode->i_mapping->a_ops = &ext2_aops; 1401 } 1402 1403 struct inode *ext2_iget (struct super_block *sb, unsigned long ino) 1404 { 1405 struct ext2_inode_info *ei; 1406 struct buffer_head * bh = NULL; 1407 struct ext2_inode *raw_inode; 1408 struct inode *inode; 1409 long ret = -EIO; 1410 int n; 1411 uid_t i_uid; 1412 gid_t i_gid; 1413 1414 inode = iget_locked(sb, ino); 1415 if (!inode) 1416 return ERR_PTR(-ENOMEM); 1417 if (!(inode->i_state & I_NEW)) 1418 return inode; 1419 1420 ei = EXT2_I(inode); 1421 ei->i_block_alloc_info = NULL; 1422 1423 raw_inode = ext2_get_inode(inode->i_sb, ino, &bh); 1424 if (IS_ERR(raw_inode)) { 1425 ret = PTR_ERR(raw_inode); 1426 goto bad_inode; 1427 } 1428 1429 inode->i_mode = le16_to_cpu(raw_inode->i_mode); 1430 i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low); 1431 i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low); 1432 if (!(test_opt (inode->i_sb, NO_UID32))) { 1433 i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16; 1434 i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16; 1435 } 1436 i_uid_write(inode, i_uid); 1437 i_gid_write(inode, i_gid); 1438 set_nlink(inode, le16_to_cpu(raw_inode->i_links_count)); 1439 inode->i_size = le32_to_cpu(raw_inode->i_size); 1440 inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime); 1441 inode->i_ctime.tv_sec = (signed)le32_to_cpu(raw_inode->i_ctime); 1442 inode->i_mtime.tv_sec = (signed)le32_to_cpu(raw_inode->i_mtime); 1443 inode->i_atime.tv_nsec = inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = 0; 1444 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime); 1445 /* We now have enough fields to check if the inode was active or not. 1446 * This is needed because nfsd might try to access dead inodes 1447 * the test is that same one that e2fsck uses 1448 * NeilBrown 1999oct15 1449 */ 1450 if (inode->i_nlink == 0 && (inode->i_mode == 0 || ei->i_dtime)) { 1451 /* this inode is deleted */ 1452 ret = -ESTALE; 1453 goto bad_inode; 1454 } 1455 inode->i_blocks = le32_to_cpu(raw_inode->i_blocks); 1456 ei->i_flags = le32_to_cpu(raw_inode->i_flags); 1457 ext2_set_inode_flags(inode); 1458 ei->i_faddr = le32_to_cpu(raw_inode->i_faddr); 1459 ei->i_frag_no = raw_inode->i_frag; 1460 ei->i_frag_size = raw_inode->i_fsize; 1461 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl); 1462 ei->i_dir_acl = 0; 1463 1464 if (ei->i_file_acl && 1465 !ext2_data_block_valid(EXT2_SB(sb), ei->i_file_acl, 1)) { 1466 ext2_error(sb, "ext2_iget", "bad extended attribute block %u", 1467 ei->i_file_acl); 1468 ret = -EFSCORRUPTED; 1469 goto bad_inode; 1470 } 1471 1472 if (S_ISREG(inode->i_mode)) 1473 inode->i_size |= ((__u64)le32_to_cpu(raw_inode->i_size_high)) << 32; 1474 else 1475 ei->i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl); 1476 if (i_size_read(inode) < 0) { 1477 ret = -EFSCORRUPTED; 1478 goto bad_inode; 1479 } 1480 ei->i_dtime = 0; 1481 inode->i_generation = le32_to_cpu(raw_inode->i_generation); 1482 ei->i_state = 0; 1483 ei->i_block_group = (ino - 1) / EXT2_INODES_PER_GROUP(inode->i_sb); 1484 ei->i_dir_start_lookup = 0; 1485 1486 /* 1487 * NOTE! The in-memory inode i_data array is in little-endian order 1488 * even on big-endian machines: we do NOT byteswap the block numbers! 1489 */ 1490 for (n = 0; n < EXT2_N_BLOCKS; n++) 1491 ei->i_data[n] = raw_inode->i_block[n]; 1492 1493 if (S_ISREG(inode->i_mode)) { 1494 ext2_set_file_ops(inode); 1495 } else if (S_ISDIR(inode->i_mode)) { 1496 inode->i_op = &ext2_dir_inode_operations; 1497 inode->i_fop = &ext2_dir_operations; 1498 if (test_opt(inode->i_sb, NOBH)) 1499 inode->i_mapping->a_ops = &ext2_nobh_aops; 1500 else 1501 inode->i_mapping->a_ops = &ext2_aops; 1502 } else if (S_ISLNK(inode->i_mode)) { 1503 if (ext2_inode_is_fast_symlink(inode)) { 1504 inode->i_link = (char *)ei->i_data; 1505 inode->i_op = &ext2_fast_symlink_inode_operations; 1506 nd_terminate_link(ei->i_data, inode->i_size, 1507 sizeof(ei->i_data) - 1); 1508 } else { 1509 inode->i_op = &ext2_symlink_inode_operations; 1510 inode_nohighmem(inode); 1511 if (test_opt(inode->i_sb, NOBH)) 1512 inode->i_mapping->a_ops = &ext2_nobh_aops; 1513 else 1514 inode->i_mapping->a_ops = &ext2_aops; 1515 } 1516 } else { 1517 inode->i_op = &ext2_special_inode_operations; 1518 if (raw_inode->i_block[0]) 1519 init_special_inode(inode, inode->i_mode, 1520 old_decode_dev(le32_to_cpu(raw_inode->i_block[0]))); 1521 else 1522 init_special_inode(inode, inode->i_mode, 1523 new_decode_dev(le32_to_cpu(raw_inode->i_block[1]))); 1524 } 1525 brelse (bh); 1526 unlock_new_inode(inode); 1527 return inode; 1528 1529 bad_inode: 1530 brelse(bh); 1531 iget_failed(inode); 1532 return ERR_PTR(ret); 1533 } 1534 1535 static int __ext2_write_inode(struct inode *inode, int do_sync) 1536 { 1537 struct ext2_inode_info *ei = EXT2_I(inode); 1538 struct super_block *sb = inode->i_sb; 1539 ino_t ino = inode->i_ino; 1540 uid_t uid = i_uid_read(inode); 1541 gid_t gid = i_gid_read(inode); 1542 struct buffer_head * bh; 1543 struct ext2_inode * raw_inode = ext2_get_inode(sb, ino, &bh); 1544 int n; 1545 int err = 0; 1546 1547 if (IS_ERR(raw_inode)) 1548 return -EIO; 1549 1550 /* For fields not not tracking in the in-memory inode, 1551 * initialise them to zero for new inodes. */ 1552 if (ei->i_state & EXT2_STATE_NEW) 1553 memset(raw_inode, 0, EXT2_SB(sb)->s_inode_size); 1554 1555 raw_inode->i_mode = cpu_to_le16(inode->i_mode); 1556 if (!(test_opt(sb, NO_UID32))) { 1557 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(uid)); 1558 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(gid)); 1559 /* 1560 * Fix up interoperability with old kernels. Otherwise, old inodes get 1561 * re-used with the upper 16 bits of the uid/gid intact 1562 */ 1563 if (!ei->i_dtime) { 1564 raw_inode->i_uid_high = cpu_to_le16(high_16_bits(uid)); 1565 raw_inode->i_gid_high = cpu_to_le16(high_16_bits(gid)); 1566 } else { 1567 raw_inode->i_uid_high = 0; 1568 raw_inode->i_gid_high = 0; 1569 } 1570 } else { 1571 raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(uid)); 1572 raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(gid)); 1573 raw_inode->i_uid_high = 0; 1574 raw_inode->i_gid_high = 0; 1575 } 1576 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); 1577 raw_inode->i_size = cpu_to_le32(inode->i_size); 1578 raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec); 1579 raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec); 1580 raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec); 1581 1582 raw_inode->i_blocks = cpu_to_le32(inode->i_blocks); 1583 raw_inode->i_dtime = cpu_to_le32(ei->i_dtime); 1584 raw_inode->i_flags = cpu_to_le32(ei->i_flags); 1585 raw_inode->i_faddr = cpu_to_le32(ei->i_faddr); 1586 raw_inode->i_frag = ei->i_frag_no; 1587 raw_inode->i_fsize = ei->i_frag_size; 1588 raw_inode->i_file_acl = cpu_to_le32(ei->i_file_acl); 1589 if (!S_ISREG(inode->i_mode)) 1590 raw_inode->i_dir_acl = cpu_to_le32(ei->i_dir_acl); 1591 else { 1592 raw_inode->i_size_high = cpu_to_le32(inode->i_size >> 32); 1593 if (inode->i_size > 0x7fffffffULL) { 1594 if (!EXT2_HAS_RO_COMPAT_FEATURE(sb, 1595 EXT2_FEATURE_RO_COMPAT_LARGE_FILE) || 1596 EXT2_SB(sb)->s_es->s_rev_level == 1597 cpu_to_le32(EXT2_GOOD_OLD_REV)) { 1598 /* If this is the first large file 1599 * created, add a flag to the superblock. 1600 */ 1601 spin_lock(&EXT2_SB(sb)->s_lock); 1602 ext2_update_dynamic_rev(sb); 1603 EXT2_SET_RO_COMPAT_FEATURE(sb, 1604 EXT2_FEATURE_RO_COMPAT_LARGE_FILE); 1605 spin_unlock(&EXT2_SB(sb)->s_lock); 1606 ext2_sync_super(sb, EXT2_SB(sb)->s_es, 1); 1607 } 1608 } 1609 } 1610 1611 raw_inode->i_generation = cpu_to_le32(inode->i_generation); 1612 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { 1613 if (old_valid_dev(inode->i_rdev)) { 1614 raw_inode->i_block[0] = 1615 cpu_to_le32(old_encode_dev(inode->i_rdev)); 1616 raw_inode->i_block[1] = 0; 1617 } else { 1618 raw_inode->i_block[0] = 0; 1619 raw_inode->i_block[1] = 1620 cpu_to_le32(new_encode_dev(inode->i_rdev)); 1621 raw_inode->i_block[2] = 0; 1622 } 1623 } else for (n = 0; n < EXT2_N_BLOCKS; n++) 1624 raw_inode->i_block[n] = ei->i_data[n]; 1625 mark_buffer_dirty(bh); 1626 if (do_sync) { 1627 sync_dirty_buffer(bh); 1628 if (buffer_req(bh) && !buffer_uptodate(bh)) { 1629 printk ("IO error syncing ext2 inode [%s:%08lx]\n", 1630 sb->s_id, (unsigned long) ino); 1631 err = -EIO; 1632 } 1633 } 1634 ei->i_state &= ~EXT2_STATE_NEW; 1635 brelse (bh); 1636 return err; 1637 } 1638 1639 int ext2_write_inode(struct inode *inode, struct writeback_control *wbc) 1640 { 1641 return __ext2_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL); 1642 } 1643 1644 int ext2_getattr(const struct path *path, struct kstat *stat, 1645 u32 request_mask, unsigned int query_flags) 1646 { 1647 struct inode *inode = d_inode(path->dentry); 1648 struct ext2_inode_info *ei = EXT2_I(inode); 1649 unsigned int flags; 1650 1651 flags = ei->i_flags & EXT2_FL_USER_VISIBLE; 1652 if (flags & EXT2_APPEND_FL) 1653 stat->attributes |= STATX_ATTR_APPEND; 1654 if (flags & EXT2_COMPR_FL) 1655 stat->attributes |= STATX_ATTR_COMPRESSED; 1656 if (flags & EXT2_IMMUTABLE_FL) 1657 stat->attributes |= STATX_ATTR_IMMUTABLE; 1658 if (flags & EXT2_NODUMP_FL) 1659 stat->attributes |= STATX_ATTR_NODUMP; 1660 stat->attributes_mask |= (STATX_ATTR_APPEND | 1661 STATX_ATTR_COMPRESSED | 1662 STATX_ATTR_ENCRYPTED | 1663 STATX_ATTR_IMMUTABLE | 1664 STATX_ATTR_NODUMP); 1665 1666 generic_fillattr(inode, stat); 1667 return 0; 1668 } 1669 1670 int ext2_setattr(struct dentry *dentry, struct iattr *iattr) 1671 { 1672 struct inode *inode = d_inode(dentry); 1673 int error; 1674 1675 error = setattr_prepare(dentry, iattr); 1676 if (error) 1677 return error; 1678 1679 if (is_quota_modification(inode, iattr)) { 1680 error = dquot_initialize(inode); 1681 if (error) 1682 return error; 1683 } 1684 if ((iattr->ia_valid & ATTR_UID && !uid_eq(iattr->ia_uid, inode->i_uid)) || 1685 (iattr->ia_valid & ATTR_GID && !gid_eq(iattr->ia_gid, inode->i_gid))) { 1686 error = dquot_transfer(inode, iattr); 1687 if (error) 1688 return error; 1689 } 1690 if (iattr->ia_valid & ATTR_SIZE && iattr->ia_size != inode->i_size) { 1691 error = ext2_setsize(inode, iattr->ia_size); 1692 if (error) 1693 return error; 1694 } 1695 setattr_copy(inode, iattr); 1696 if (iattr->ia_valid & ATTR_MODE) 1697 error = posix_acl_chmod(inode, inode->i_mode); 1698 mark_inode_dirty(inode); 1699 1700 return error; 1701 } 1702