1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 4 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. 5 */ 6 7 #include <linux/spinlock.h> 8 #include <linux/completion.h> 9 #include <linux/buffer_head.h> 10 #include <linux/blkdev.h> 11 #include <linux/gfs2_ondisk.h> 12 #include <linux/crc32.h> 13 #include <linux/iomap.h> 14 #include <linux/ktime.h> 15 16 #include "gfs2.h" 17 #include "incore.h" 18 #include "bmap.h" 19 #include "glock.h" 20 #include "inode.h" 21 #include "meta_io.h" 22 #include "quota.h" 23 #include "rgrp.h" 24 #include "log.h" 25 #include "super.h" 26 #include "trans.h" 27 #include "dir.h" 28 #include "util.h" 29 #include "aops.h" 30 #include "trace_gfs2.h" 31 32 /* This doesn't need to be that large as max 64 bit pointers in a 4k 33 * block is 512, so __u16 is fine for that. It saves stack space to 34 * keep it small. 35 */ 36 struct metapath { 37 struct buffer_head *mp_bh[GFS2_MAX_META_HEIGHT]; 38 __u16 mp_list[GFS2_MAX_META_HEIGHT]; 39 int mp_fheight; /* find_metapath height */ 40 int mp_aheight; /* actual height (lookup height) */ 41 }; 42 43 static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length); 44 45 /** 46 * gfs2_unstuffer_page - unstuff a stuffed inode into a block cached by a page 47 * @ip: the inode 48 * @dibh: the dinode buffer 49 * @block: the block number that was allocated 50 * @page: The (optional) page. This is looked up if @page is NULL 51 * 52 * Returns: errno 53 */ 54 55 static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh, 56 u64 block, struct page *page) 57 { 58 struct inode *inode = &ip->i_inode; 59 60 if (!PageUptodate(page)) { 61 void *kaddr = kmap(page); 62 u64 dsize = i_size_read(inode); 63 64 if (dsize > gfs2_max_stuffed_size(ip)) 65 dsize = gfs2_max_stuffed_size(ip); 66 67 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize); 68 memset(kaddr + dsize, 0, PAGE_SIZE - dsize); 69 kunmap(page); 70 71 SetPageUptodate(page); 72 } 73 74 if (gfs2_is_jdata(ip)) { 75 struct buffer_head *bh; 76 77 if (!page_has_buffers(page)) 78 create_empty_buffers(page, BIT(inode->i_blkbits), 79 BIT(BH_Uptodate)); 80 81 bh = page_buffers(page); 82 if (!buffer_mapped(bh)) 83 map_bh(bh, inode->i_sb, block); 84 85 set_buffer_uptodate(bh); 86 gfs2_trans_add_data(ip->i_gl, bh); 87 } else { 88 set_page_dirty(page); 89 gfs2_ordered_add_inode(ip); 90 } 91 92 return 0; 93 } 94 95 static int __gfs2_unstuff_inode(struct gfs2_inode *ip, struct page *page) 96 { 97 struct buffer_head *bh, *dibh; 98 struct gfs2_dinode *di; 99 u64 block = 0; 100 int isdir = gfs2_is_dir(ip); 101 int error; 102 103 error = gfs2_meta_inode_buffer(ip, &dibh); 104 if (error) 105 return error; 106 107 if (i_size_read(&ip->i_inode)) { 108 /* Get a free block, fill it with the stuffed data, 109 and write it out to disk */ 110 111 unsigned int n = 1; 112 error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL); 113 if (error) 114 goto out_brelse; 115 if (isdir) { 116 gfs2_trans_remove_revoke(GFS2_SB(&ip->i_inode), block, 1); 117 error = gfs2_dir_get_new_buffer(ip, block, &bh); 118 if (error) 119 goto out_brelse; 120 gfs2_buffer_copy_tail(bh, sizeof(struct gfs2_meta_header), 121 dibh, sizeof(struct gfs2_dinode)); 122 brelse(bh); 123 } else { 124 error = gfs2_unstuffer_page(ip, dibh, block, page); 125 if (error) 126 goto out_brelse; 127 } 128 } 129 130 /* Set up the pointer to the new block */ 131 132 gfs2_trans_add_meta(ip->i_gl, dibh); 133 di = (struct gfs2_dinode *)dibh->b_data; 134 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode)); 135 136 if (i_size_read(&ip->i_inode)) { 137 *(__be64 *)(di + 1) = cpu_to_be64(block); 138 gfs2_add_inode_blocks(&ip->i_inode, 1); 139 di->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode)); 140 } 141 142 ip->i_height = 1; 143 di->di_height = cpu_to_be16(1); 144 145 out_brelse: 146 brelse(dibh); 147 return error; 148 } 149 150 /** 151 * gfs2_unstuff_dinode - Unstuff a dinode when the data has grown too big 152 * @ip: The GFS2 inode to unstuff 153 * 154 * This routine unstuffs a dinode and returns it to a "normal" state such 155 * that the height can be grown in the traditional way. 156 * 157 * Returns: errno 158 */ 159 160 int gfs2_unstuff_dinode(struct gfs2_inode *ip) 161 { 162 struct inode *inode = &ip->i_inode; 163 struct page *page; 164 int error; 165 166 down_write(&ip->i_rw_mutex); 167 page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS); 168 error = -ENOMEM; 169 if (!page) 170 goto out; 171 error = __gfs2_unstuff_inode(ip, page); 172 unlock_page(page); 173 put_page(page); 174 out: 175 up_write(&ip->i_rw_mutex); 176 return error; 177 } 178 179 /** 180 * find_metapath - Find path through the metadata tree 181 * @sdp: The superblock 182 * @block: The disk block to look up 183 * @mp: The metapath to return the result in 184 * @height: The pre-calculated height of the metadata tree 185 * 186 * This routine returns a struct metapath structure that defines a path 187 * through the metadata of inode "ip" to get to block "block". 188 * 189 * Example: 190 * Given: "ip" is a height 3 file, "offset" is 101342453, and this is a 191 * filesystem with a blocksize of 4096. 192 * 193 * find_metapath() would return a struct metapath structure set to: 194 * mp_fheight = 3, mp_list[0] = 0, mp_list[1] = 48, and mp_list[2] = 165. 195 * 196 * That means that in order to get to the block containing the byte at 197 * offset 101342453, we would load the indirect block pointed to by pointer 198 * 0 in the dinode. We would then load the indirect block pointed to by 199 * pointer 48 in that indirect block. We would then load the data block 200 * pointed to by pointer 165 in that indirect block. 201 * 202 * ---------------------------------------- 203 * | Dinode | | 204 * | | 4| 205 * | |0 1 2 3 4 5 9| 206 * | | 6| 207 * ---------------------------------------- 208 * | 209 * | 210 * V 211 * ---------------------------------------- 212 * | Indirect Block | 213 * | 5| 214 * | 4 4 4 4 4 5 5 1| 215 * |0 5 6 7 8 9 0 1 2| 216 * ---------------------------------------- 217 * | 218 * | 219 * V 220 * ---------------------------------------- 221 * | Indirect Block | 222 * | 1 1 1 1 1 5| 223 * | 6 6 6 6 6 1| 224 * |0 3 4 5 6 7 2| 225 * ---------------------------------------- 226 * | 227 * | 228 * V 229 * ---------------------------------------- 230 * | Data block containing offset | 231 * | 101342453 | 232 * | | 233 * | | 234 * ---------------------------------------- 235 * 236 */ 237 238 static void find_metapath(const struct gfs2_sbd *sdp, u64 block, 239 struct metapath *mp, unsigned int height) 240 { 241 unsigned int i; 242 243 mp->mp_fheight = height; 244 for (i = height; i--;) 245 mp->mp_list[i] = do_div(block, sdp->sd_inptrs); 246 } 247 248 static inline unsigned int metapath_branch_start(const struct metapath *mp) 249 { 250 if (mp->mp_list[0] == 0) 251 return 2; 252 return 1; 253 } 254 255 /** 256 * metaptr1 - Return the first possible metadata pointer in a metapath buffer 257 * @height: The metadata height (0 = dinode) 258 * @mp: The metapath 259 */ 260 static inline __be64 *metaptr1(unsigned int height, const struct metapath *mp) 261 { 262 struct buffer_head *bh = mp->mp_bh[height]; 263 if (height == 0) 264 return ((__be64 *)(bh->b_data + sizeof(struct gfs2_dinode))); 265 return ((__be64 *)(bh->b_data + sizeof(struct gfs2_meta_header))); 266 } 267 268 /** 269 * metapointer - Return pointer to start of metadata in a buffer 270 * @height: The metadata height (0 = dinode) 271 * @mp: The metapath 272 * 273 * Return a pointer to the block number of the next height of the metadata 274 * tree given a buffer containing the pointer to the current height of the 275 * metadata tree. 276 */ 277 278 static inline __be64 *metapointer(unsigned int height, const struct metapath *mp) 279 { 280 __be64 *p = metaptr1(height, mp); 281 return p + mp->mp_list[height]; 282 } 283 284 static inline const __be64 *metaend(unsigned int height, const struct metapath *mp) 285 { 286 const struct buffer_head *bh = mp->mp_bh[height]; 287 return (const __be64 *)(bh->b_data + bh->b_size); 288 } 289 290 static void clone_metapath(struct metapath *clone, struct metapath *mp) 291 { 292 unsigned int hgt; 293 294 *clone = *mp; 295 for (hgt = 0; hgt < mp->mp_aheight; hgt++) 296 get_bh(clone->mp_bh[hgt]); 297 } 298 299 static void gfs2_metapath_ra(struct gfs2_glock *gl, __be64 *start, __be64 *end) 300 { 301 const __be64 *t; 302 303 for (t = start; t < end; t++) { 304 struct buffer_head *rabh; 305 306 if (!*t) 307 continue; 308 309 rabh = gfs2_getbuf(gl, be64_to_cpu(*t), CREATE); 310 if (trylock_buffer(rabh)) { 311 if (!buffer_uptodate(rabh)) { 312 rabh->b_end_io = end_buffer_read_sync; 313 submit_bh(REQ_OP_READ, 314 REQ_RAHEAD | REQ_META | REQ_PRIO, 315 rabh); 316 continue; 317 } 318 unlock_buffer(rabh); 319 } 320 brelse(rabh); 321 } 322 } 323 324 static int __fillup_metapath(struct gfs2_inode *ip, struct metapath *mp, 325 unsigned int x, unsigned int h) 326 { 327 for (; x < h; x++) { 328 __be64 *ptr = metapointer(x, mp); 329 u64 dblock = be64_to_cpu(*ptr); 330 int ret; 331 332 if (!dblock) 333 break; 334 ret = gfs2_meta_buffer(ip, GFS2_METATYPE_IN, dblock, &mp->mp_bh[x + 1]); 335 if (ret) 336 return ret; 337 } 338 mp->mp_aheight = x + 1; 339 return 0; 340 } 341 342 /** 343 * lookup_metapath - Walk the metadata tree to a specific point 344 * @ip: The inode 345 * @mp: The metapath 346 * 347 * Assumes that the inode's buffer has already been looked up and 348 * hooked onto mp->mp_bh[0] and that the metapath has been initialised 349 * by find_metapath(). 350 * 351 * If this function encounters part of the tree which has not been 352 * allocated, it returns the current height of the tree at the point 353 * at which it found the unallocated block. Blocks which are found are 354 * added to the mp->mp_bh[] list. 355 * 356 * Returns: error 357 */ 358 359 static int lookup_metapath(struct gfs2_inode *ip, struct metapath *mp) 360 { 361 return __fillup_metapath(ip, mp, 0, ip->i_height - 1); 362 } 363 364 /** 365 * fillup_metapath - fill up buffers for the metadata path to a specific height 366 * @ip: The inode 367 * @mp: The metapath 368 * @h: The height to which it should be mapped 369 * 370 * Similar to lookup_metapath, but does lookups for a range of heights 371 * 372 * Returns: error or the number of buffers filled 373 */ 374 375 static int fillup_metapath(struct gfs2_inode *ip, struct metapath *mp, int h) 376 { 377 unsigned int x = 0; 378 int ret; 379 380 if (h) { 381 /* find the first buffer we need to look up. */ 382 for (x = h - 1; x > 0; x--) { 383 if (mp->mp_bh[x]) 384 break; 385 } 386 } 387 ret = __fillup_metapath(ip, mp, x, h); 388 if (ret) 389 return ret; 390 return mp->mp_aheight - x - 1; 391 } 392 393 static sector_t metapath_to_block(struct gfs2_sbd *sdp, struct metapath *mp) 394 { 395 sector_t factor = 1, block = 0; 396 int hgt; 397 398 for (hgt = mp->mp_fheight - 1; hgt >= 0; hgt--) { 399 if (hgt < mp->mp_aheight) 400 block += mp->mp_list[hgt] * factor; 401 factor *= sdp->sd_inptrs; 402 } 403 return block; 404 } 405 406 static void release_metapath(struct metapath *mp) 407 { 408 int i; 409 410 for (i = 0; i < GFS2_MAX_META_HEIGHT; i++) { 411 if (mp->mp_bh[i] == NULL) 412 break; 413 brelse(mp->mp_bh[i]); 414 mp->mp_bh[i] = NULL; 415 } 416 } 417 418 /** 419 * gfs2_extent_length - Returns length of an extent of blocks 420 * @bh: The metadata block 421 * @ptr: Current position in @bh 422 * @limit: Max extent length to return 423 * @eob: Set to 1 if we hit "end of block" 424 * 425 * Returns: The length of the extent (minimum of one block) 426 */ 427 428 static inline unsigned int gfs2_extent_length(struct buffer_head *bh, __be64 *ptr, size_t limit, int *eob) 429 { 430 const __be64 *end = (__be64 *)(bh->b_data + bh->b_size); 431 const __be64 *first = ptr; 432 u64 d = be64_to_cpu(*ptr); 433 434 *eob = 0; 435 do { 436 ptr++; 437 if (ptr >= end) 438 break; 439 d++; 440 } while(be64_to_cpu(*ptr) == d); 441 if (ptr >= end) 442 *eob = 1; 443 return ptr - first; 444 } 445 446 enum walker_status { WALK_STOP, WALK_FOLLOW, WALK_CONTINUE }; 447 448 /* 449 * gfs2_metadata_walker - walk an indirect block 450 * @mp: Metapath to indirect block 451 * @ptrs: Number of pointers to look at 452 * 453 * When returning WALK_FOLLOW, the walker must update @mp to point at the right 454 * indirect block to follow. 455 */ 456 typedef enum walker_status (*gfs2_metadata_walker)(struct metapath *mp, 457 unsigned int ptrs); 458 459 /* 460 * gfs2_walk_metadata - walk a tree of indirect blocks 461 * @inode: The inode 462 * @mp: Starting point of walk 463 * @max_len: Maximum number of blocks to walk 464 * @walker: Called during the walk 465 * 466 * Returns 1 if the walk was stopped by @walker, 0 if we went past @max_len or 467 * past the end of metadata, and a negative error code otherwise. 468 */ 469 470 static int gfs2_walk_metadata(struct inode *inode, struct metapath *mp, 471 u64 max_len, gfs2_metadata_walker walker) 472 { 473 struct gfs2_inode *ip = GFS2_I(inode); 474 struct gfs2_sbd *sdp = GFS2_SB(inode); 475 u64 factor = 1; 476 unsigned int hgt; 477 int ret; 478 479 /* 480 * The walk starts in the lowest allocated indirect block, which may be 481 * before the position indicated by @mp. Adjust @max_len accordingly 482 * to avoid a short walk. 483 */ 484 for (hgt = mp->mp_fheight - 1; hgt >= mp->mp_aheight; hgt--) { 485 max_len += mp->mp_list[hgt] * factor; 486 mp->mp_list[hgt] = 0; 487 factor *= sdp->sd_inptrs; 488 } 489 490 for (;;) { 491 u16 start = mp->mp_list[hgt]; 492 enum walker_status status; 493 unsigned int ptrs; 494 u64 len; 495 496 /* Walk indirect block. */ 497 ptrs = (hgt >= 1 ? sdp->sd_inptrs : sdp->sd_diptrs) - start; 498 len = ptrs * factor; 499 if (len > max_len) 500 ptrs = DIV_ROUND_UP_ULL(max_len, factor); 501 status = walker(mp, ptrs); 502 switch (status) { 503 case WALK_STOP: 504 return 1; 505 case WALK_FOLLOW: 506 BUG_ON(mp->mp_aheight == mp->mp_fheight); 507 ptrs = mp->mp_list[hgt] - start; 508 len = ptrs * factor; 509 break; 510 case WALK_CONTINUE: 511 break; 512 } 513 if (len >= max_len) 514 break; 515 max_len -= len; 516 if (status == WALK_FOLLOW) 517 goto fill_up_metapath; 518 519 lower_metapath: 520 /* Decrease height of metapath. */ 521 brelse(mp->mp_bh[hgt]); 522 mp->mp_bh[hgt] = NULL; 523 mp->mp_list[hgt] = 0; 524 if (!hgt) 525 break; 526 hgt--; 527 factor *= sdp->sd_inptrs; 528 529 /* Advance in metadata tree. */ 530 (mp->mp_list[hgt])++; 531 if (hgt) { 532 if (mp->mp_list[hgt] >= sdp->sd_inptrs) 533 goto lower_metapath; 534 } else { 535 if (mp->mp_list[hgt] >= sdp->sd_diptrs) 536 break; 537 } 538 539 fill_up_metapath: 540 /* Increase height of metapath. */ 541 ret = fillup_metapath(ip, mp, ip->i_height - 1); 542 if (ret < 0) 543 return ret; 544 hgt += ret; 545 for (; ret; ret--) 546 do_div(factor, sdp->sd_inptrs); 547 mp->mp_aheight = hgt + 1; 548 } 549 return 0; 550 } 551 552 static enum walker_status gfs2_hole_walker(struct metapath *mp, 553 unsigned int ptrs) 554 { 555 const __be64 *start, *ptr, *end; 556 unsigned int hgt; 557 558 hgt = mp->mp_aheight - 1; 559 start = metapointer(hgt, mp); 560 end = start + ptrs; 561 562 for (ptr = start; ptr < end; ptr++) { 563 if (*ptr) { 564 mp->mp_list[hgt] += ptr - start; 565 if (mp->mp_aheight == mp->mp_fheight) 566 return WALK_STOP; 567 return WALK_FOLLOW; 568 } 569 } 570 return WALK_CONTINUE; 571 } 572 573 /** 574 * gfs2_hole_size - figure out the size of a hole 575 * @inode: The inode 576 * @lblock: The logical starting block number 577 * @len: How far to look (in blocks) 578 * @mp: The metapath at lblock 579 * @iomap: The iomap to store the hole size in 580 * 581 * This function modifies @mp. 582 * 583 * Returns: errno on error 584 */ 585 static int gfs2_hole_size(struct inode *inode, sector_t lblock, u64 len, 586 struct metapath *mp, struct iomap *iomap) 587 { 588 struct metapath clone; 589 u64 hole_size; 590 int ret; 591 592 clone_metapath(&clone, mp); 593 ret = gfs2_walk_metadata(inode, &clone, len, gfs2_hole_walker); 594 if (ret < 0) 595 goto out; 596 597 if (ret == 1) 598 hole_size = metapath_to_block(GFS2_SB(inode), &clone) - lblock; 599 else 600 hole_size = len; 601 iomap->length = hole_size << inode->i_blkbits; 602 ret = 0; 603 604 out: 605 release_metapath(&clone); 606 return ret; 607 } 608 609 static inline void gfs2_indirect_init(struct metapath *mp, 610 struct gfs2_glock *gl, unsigned int i, 611 unsigned offset, u64 bn) 612 { 613 __be64 *ptr = (__be64 *)(mp->mp_bh[i - 1]->b_data + 614 ((i > 1) ? sizeof(struct gfs2_meta_header) : 615 sizeof(struct gfs2_dinode))); 616 BUG_ON(i < 1); 617 BUG_ON(mp->mp_bh[i] != NULL); 618 mp->mp_bh[i] = gfs2_meta_new(gl, bn); 619 gfs2_trans_add_meta(gl, mp->mp_bh[i]); 620 gfs2_metatype_set(mp->mp_bh[i], GFS2_METATYPE_IN, GFS2_FORMAT_IN); 621 gfs2_buffer_clear_tail(mp->mp_bh[i], sizeof(struct gfs2_meta_header)); 622 ptr += offset; 623 *ptr = cpu_to_be64(bn); 624 } 625 626 enum alloc_state { 627 ALLOC_DATA = 0, 628 ALLOC_GROW_DEPTH = 1, 629 ALLOC_GROW_HEIGHT = 2, 630 /* ALLOC_UNSTUFF = 3, TBD and rather complicated */ 631 }; 632 633 /** 634 * __gfs2_iomap_alloc - Build a metadata tree of the requested height 635 * @inode: The GFS2 inode 636 * @iomap: The iomap structure 637 * @mp: The metapath, with proper height information calculated 638 * 639 * In this routine we may have to alloc: 640 * i) Indirect blocks to grow the metadata tree height 641 * ii) Indirect blocks to fill in lower part of the metadata tree 642 * iii) Data blocks 643 * 644 * This function is called after __gfs2_iomap_get, which works out the 645 * total number of blocks which we need via gfs2_alloc_size. 646 * 647 * We then do the actual allocation asking for an extent at a time (if 648 * enough contiguous free blocks are available, there will only be one 649 * allocation request per call) and uses the state machine to initialise 650 * the blocks in order. 651 * 652 * Right now, this function will allocate at most one indirect block 653 * worth of data -- with a default block size of 4K, that's slightly 654 * less than 2M. If this limitation is ever removed to allow huge 655 * allocations, we would probably still want to limit the iomap size we 656 * return to avoid stalling other tasks during huge writes; the next 657 * iomap iteration would then find the blocks already allocated. 658 * 659 * Returns: errno on error 660 */ 661 662 static int __gfs2_iomap_alloc(struct inode *inode, struct iomap *iomap, 663 struct metapath *mp) 664 { 665 struct gfs2_inode *ip = GFS2_I(inode); 666 struct gfs2_sbd *sdp = GFS2_SB(inode); 667 struct buffer_head *dibh = mp->mp_bh[0]; 668 u64 bn; 669 unsigned n, i, blks, alloced = 0, iblks = 0, branch_start = 0; 670 size_t dblks = iomap->length >> inode->i_blkbits; 671 const unsigned end_of_metadata = mp->mp_fheight - 1; 672 int ret; 673 enum alloc_state state; 674 __be64 *ptr; 675 __be64 zero_bn = 0; 676 677 BUG_ON(mp->mp_aheight < 1); 678 BUG_ON(dibh == NULL); 679 BUG_ON(dblks < 1); 680 681 gfs2_trans_add_meta(ip->i_gl, dibh); 682 683 down_write(&ip->i_rw_mutex); 684 685 if (mp->mp_fheight == mp->mp_aheight) { 686 /* Bottom indirect block exists */ 687 state = ALLOC_DATA; 688 } else { 689 /* Need to allocate indirect blocks */ 690 if (mp->mp_fheight == ip->i_height) { 691 /* Writing into existing tree, extend tree down */ 692 iblks = mp->mp_fheight - mp->mp_aheight; 693 state = ALLOC_GROW_DEPTH; 694 } else { 695 /* Building up tree height */ 696 state = ALLOC_GROW_HEIGHT; 697 iblks = mp->mp_fheight - ip->i_height; 698 branch_start = metapath_branch_start(mp); 699 iblks += (mp->mp_fheight - branch_start); 700 } 701 } 702 703 /* start of the second part of the function (state machine) */ 704 705 blks = dblks + iblks; 706 i = mp->mp_aheight; 707 do { 708 n = blks - alloced; 709 ret = gfs2_alloc_blocks(ip, &bn, &n, 0, NULL); 710 if (ret) 711 goto out; 712 alloced += n; 713 if (state != ALLOC_DATA || gfs2_is_jdata(ip)) 714 gfs2_trans_remove_revoke(sdp, bn, n); 715 switch (state) { 716 /* Growing height of tree */ 717 case ALLOC_GROW_HEIGHT: 718 if (i == 1) { 719 ptr = (__be64 *)(dibh->b_data + 720 sizeof(struct gfs2_dinode)); 721 zero_bn = *ptr; 722 } 723 for (; i - 1 < mp->mp_fheight - ip->i_height && n > 0; 724 i++, n--) 725 gfs2_indirect_init(mp, ip->i_gl, i, 0, bn++); 726 if (i - 1 == mp->mp_fheight - ip->i_height) { 727 i--; 728 gfs2_buffer_copy_tail(mp->mp_bh[i], 729 sizeof(struct gfs2_meta_header), 730 dibh, sizeof(struct gfs2_dinode)); 731 gfs2_buffer_clear_tail(dibh, 732 sizeof(struct gfs2_dinode) + 733 sizeof(__be64)); 734 ptr = (__be64 *)(mp->mp_bh[i]->b_data + 735 sizeof(struct gfs2_meta_header)); 736 *ptr = zero_bn; 737 state = ALLOC_GROW_DEPTH; 738 for(i = branch_start; i < mp->mp_fheight; i++) { 739 if (mp->mp_bh[i] == NULL) 740 break; 741 brelse(mp->mp_bh[i]); 742 mp->mp_bh[i] = NULL; 743 } 744 i = branch_start; 745 } 746 if (n == 0) 747 break; 748 fallthrough; /* To branching from existing tree */ 749 case ALLOC_GROW_DEPTH: 750 if (i > 1 && i < mp->mp_fheight) 751 gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[i-1]); 752 for (; i < mp->mp_fheight && n > 0; i++, n--) 753 gfs2_indirect_init(mp, ip->i_gl, i, 754 mp->mp_list[i-1], bn++); 755 if (i == mp->mp_fheight) 756 state = ALLOC_DATA; 757 if (n == 0) 758 break; 759 fallthrough; /* To tree complete, adding data blocks */ 760 case ALLOC_DATA: 761 BUG_ON(n > dblks); 762 BUG_ON(mp->mp_bh[end_of_metadata] == NULL); 763 gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[end_of_metadata]); 764 dblks = n; 765 ptr = metapointer(end_of_metadata, mp); 766 iomap->addr = bn << inode->i_blkbits; 767 iomap->flags |= IOMAP_F_MERGED | IOMAP_F_NEW; 768 while (n-- > 0) 769 *ptr++ = cpu_to_be64(bn++); 770 break; 771 } 772 } while (iomap->addr == IOMAP_NULL_ADDR); 773 774 iomap->type = IOMAP_MAPPED; 775 iomap->length = (u64)dblks << inode->i_blkbits; 776 ip->i_height = mp->mp_fheight; 777 gfs2_add_inode_blocks(&ip->i_inode, alloced); 778 gfs2_dinode_out(ip, dibh->b_data); 779 out: 780 up_write(&ip->i_rw_mutex); 781 return ret; 782 } 783 784 #define IOMAP_F_GFS2_BOUNDARY IOMAP_F_PRIVATE 785 786 /** 787 * gfs2_alloc_size - Compute the maximum allocation size 788 * @inode: The inode 789 * @mp: The metapath 790 * @size: Requested size in blocks 791 * 792 * Compute the maximum size of the next allocation at @mp. 793 * 794 * Returns: size in blocks 795 */ 796 static u64 gfs2_alloc_size(struct inode *inode, struct metapath *mp, u64 size) 797 { 798 struct gfs2_inode *ip = GFS2_I(inode); 799 struct gfs2_sbd *sdp = GFS2_SB(inode); 800 const __be64 *first, *ptr, *end; 801 802 /* 803 * For writes to stuffed files, this function is called twice via 804 * __gfs2_iomap_get, before and after unstuffing. The size we return the 805 * first time needs to be large enough to get the reservation and 806 * allocation sizes right. The size we return the second time must 807 * be exact or else __gfs2_iomap_alloc won't do the right thing. 808 */ 809 810 if (gfs2_is_stuffed(ip) || mp->mp_fheight != mp->mp_aheight) { 811 unsigned int maxsize = mp->mp_fheight > 1 ? 812 sdp->sd_inptrs : sdp->sd_diptrs; 813 maxsize -= mp->mp_list[mp->mp_fheight - 1]; 814 if (size > maxsize) 815 size = maxsize; 816 return size; 817 } 818 819 first = metapointer(ip->i_height - 1, mp); 820 end = metaend(ip->i_height - 1, mp); 821 if (end - first > size) 822 end = first + size; 823 for (ptr = first; ptr < end; ptr++) { 824 if (*ptr) 825 break; 826 } 827 return ptr - first; 828 } 829 830 /** 831 * __gfs2_iomap_get - Map blocks from an inode to disk blocks 832 * @inode: The inode 833 * @pos: Starting position in bytes 834 * @length: Length to map, in bytes 835 * @flags: iomap flags 836 * @iomap: The iomap structure 837 * @mp: The metapath 838 * 839 * Returns: errno 840 */ 841 static int __gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length, 842 unsigned flags, struct iomap *iomap, 843 struct metapath *mp) 844 { 845 struct gfs2_inode *ip = GFS2_I(inode); 846 struct gfs2_sbd *sdp = GFS2_SB(inode); 847 loff_t size = i_size_read(inode); 848 __be64 *ptr; 849 sector_t lblock; 850 sector_t lblock_stop; 851 int ret; 852 int eob; 853 u64 len; 854 struct buffer_head *dibh = NULL, *bh; 855 u8 height; 856 857 if (!length) 858 return -EINVAL; 859 860 down_read(&ip->i_rw_mutex); 861 862 ret = gfs2_meta_inode_buffer(ip, &dibh); 863 if (ret) 864 goto unlock; 865 mp->mp_bh[0] = dibh; 866 867 if (gfs2_is_stuffed(ip)) { 868 if (flags & IOMAP_WRITE) { 869 loff_t max_size = gfs2_max_stuffed_size(ip); 870 871 if (pos + length > max_size) 872 goto unstuff; 873 iomap->length = max_size; 874 } else { 875 if (pos >= size) { 876 if (flags & IOMAP_REPORT) { 877 ret = -ENOENT; 878 goto unlock; 879 } else { 880 iomap->offset = pos; 881 iomap->length = length; 882 goto hole_found; 883 } 884 } 885 iomap->length = size; 886 } 887 iomap->addr = (ip->i_no_addr << inode->i_blkbits) + 888 sizeof(struct gfs2_dinode); 889 iomap->type = IOMAP_INLINE; 890 iomap->inline_data = dibh->b_data + sizeof(struct gfs2_dinode); 891 goto out; 892 } 893 894 unstuff: 895 lblock = pos >> inode->i_blkbits; 896 iomap->offset = lblock << inode->i_blkbits; 897 lblock_stop = (pos + length - 1) >> inode->i_blkbits; 898 len = lblock_stop - lblock + 1; 899 iomap->length = len << inode->i_blkbits; 900 901 height = ip->i_height; 902 while ((lblock + 1) * sdp->sd_sb.sb_bsize > sdp->sd_heightsize[height]) 903 height++; 904 find_metapath(sdp, lblock, mp, height); 905 if (height > ip->i_height || gfs2_is_stuffed(ip)) 906 goto do_alloc; 907 908 ret = lookup_metapath(ip, mp); 909 if (ret) 910 goto unlock; 911 912 if (mp->mp_aheight != ip->i_height) 913 goto do_alloc; 914 915 ptr = metapointer(ip->i_height - 1, mp); 916 if (*ptr == 0) 917 goto do_alloc; 918 919 bh = mp->mp_bh[ip->i_height - 1]; 920 len = gfs2_extent_length(bh, ptr, len, &eob); 921 922 iomap->addr = be64_to_cpu(*ptr) << inode->i_blkbits; 923 iomap->length = len << inode->i_blkbits; 924 iomap->type = IOMAP_MAPPED; 925 iomap->flags |= IOMAP_F_MERGED; 926 if (eob) 927 iomap->flags |= IOMAP_F_GFS2_BOUNDARY; 928 929 out: 930 iomap->bdev = inode->i_sb->s_bdev; 931 unlock: 932 up_read(&ip->i_rw_mutex); 933 return ret; 934 935 do_alloc: 936 if (flags & IOMAP_REPORT) { 937 if (pos >= size) 938 ret = -ENOENT; 939 else if (height == ip->i_height) 940 ret = gfs2_hole_size(inode, lblock, len, mp, iomap); 941 else 942 iomap->length = size - iomap->offset; 943 } else if (flags & IOMAP_WRITE) { 944 u64 alloc_size; 945 946 if (flags & IOMAP_DIRECT) 947 goto out; /* (see gfs2_file_direct_write) */ 948 949 len = gfs2_alloc_size(inode, mp, len); 950 alloc_size = len << inode->i_blkbits; 951 if (alloc_size < iomap->length) 952 iomap->length = alloc_size; 953 } else { 954 if (pos < size && height == ip->i_height) 955 ret = gfs2_hole_size(inode, lblock, len, mp, iomap); 956 } 957 hole_found: 958 iomap->addr = IOMAP_NULL_ADDR; 959 iomap->type = IOMAP_HOLE; 960 goto out; 961 } 962 963 static int gfs2_iomap_page_prepare(struct inode *inode, loff_t pos, 964 unsigned len) 965 { 966 unsigned int blockmask = i_blocksize(inode) - 1; 967 struct gfs2_sbd *sdp = GFS2_SB(inode); 968 unsigned int blocks; 969 970 blocks = ((pos & blockmask) + len + blockmask) >> inode->i_blkbits; 971 return gfs2_trans_begin(sdp, RES_DINODE + blocks, 0); 972 } 973 974 static void gfs2_iomap_page_done(struct inode *inode, loff_t pos, 975 unsigned copied, struct page *page) 976 { 977 struct gfs2_trans *tr = current->journal_info; 978 struct gfs2_inode *ip = GFS2_I(inode); 979 struct gfs2_sbd *sdp = GFS2_SB(inode); 980 981 if (page && !gfs2_is_stuffed(ip)) 982 gfs2_page_add_databufs(ip, page, offset_in_page(pos), copied); 983 984 if (tr->tr_num_buf_new) 985 __mark_inode_dirty(inode, I_DIRTY_DATASYNC); 986 987 gfs2_trans_end(sdp); 988 } 989 990 static const struct iomap_page_ops gfs2_iomap_page_ops = { 991 .page_prepare = gfs2_iomap_page_prepare, 992 .page_done = gfs2_iomap_page_done, 993 }; 994 995 static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos, 996 loff_t length, unsigned flags, 997 struct iomap *iomap, 998 struct metapath *mp) 999 { 1000 struct gfs2_inode *ip = GFS2_I(inode); 1001 struct gfs2_sbd *sdp = GFS2_SB(inode); 1002 bool unstuff; 1003 int ret; 1004 1005 unstuff = gfs2_is_stuffed(ip) && 1006 pos + length > gfs2_max_stuffed_size(ip); 1007 1008 if (unstuff || iomap->type == IOMAP_HOLE) { 1009 unsigned int data_blocks, ind_blocks; 1010 struct gfs2_alloc_parms ap = {}; 1011 unsigned int rblocks; 1012 struct gfs2_trans *tr; 1013 1014 gfs2_write_calc_reserv(ip, iomap->length, &data_blocks, 1015 &ind_blocks); 1016 ap.target = data_blocks + ind_blocks; 1017 ret = gfs2_quota_lock_check(ip, &ap); 1018 if (ret) 1019 return ret; 1020 1021 ret = gfs2_inplace_reserve(ip, &ap); 1022 if (ret) 1023 goto out_qunlock; 1024 1025 rblocks = RES_DINODE + ind_blocks; 1026 if (gfs2_is_jdata(ip)) 1027 rblocks += data_blocks; 1028 if (ind_blocks || data_blocks) 1029 rblocks += RES_STATFS + RES_QUOTA; 1030 if (inode == sdp->sd_rindex) 1031 rblocks += 2 * RES_STATFS; 1032 rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks); 1033 1034 ret = gfs2_trans_begin(sdp, rblocks, 1035 iomap->length >> inode->i_blkbits); 1036 if (ret) 1037 goto out_trans_fail; 1038 1039 if (unstuff) { 1040 ret = gfs2_unstuff_dinode(ip); 1041 if (ret) 1042 goto out_trans_end; 1043 release_metapath(mp); 1044 ret = __gfs2_iomap_get(inode, iomap->offset, 1045 iomap->length, flags, iomap, mp); 1046 if (ret) 1047 goto out_trans_end; 1048 } 1049 1050 if (iomap->type == IOMAP_HOLE) { 1051 ret = __gfs2_iomap_alloc(inode, iomap, mp); 1052 if (ret) { 1053 gfs2_trans_end(sdp); 1054 gfs2_inplace_release(ip); 1055 punch_hole(ip, iomap->offset, iomap->length); 1056 goto out_qunlock; 1057 } 1058 } 1059 1060 tr = current->journal_info; 1061 if (tr->tr_num_buf_new) 1062 __mark_inode_dirty(inode, I_DIRTY_DATASYNC); 1063 1064 gfs2_trans_end(sdp); 1065 } 1066 1067 if (gfs2_is_stuffed(ip) || gfs2_is_jdata(ip)) 1068 iomap->page_ops = &gfs2_iomap_page_ops; 1069 return 0; 1070 1071 out_trans_end: 1072 gfs2_trans_end(sdp); 1073 out_trans_fail: 1074 gfs2_inplace_release(ip); 1075 out_qunlock: 1076 gfs2_quota_unlock(ip); 1077 return ret; 1078 } 1079 1080 static int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length, 1081 unsigned flags, struct iomap *iomap, 1082 struct iomap *srcmap) 1083 { 1084 struct gfs2_inode *ip = GFS2_I(inode); 1085 struct metapath mp = { .mp_aheight = 1, }; 1086 int ret; 1087 1088 if (gfs2_is_jdata(ip)) 1089 iomap->flags |= IOMAP_F_BUFFER_HEAD; 1090 1091 trace_gfs2_iomap_start(ip, pos, length, flags); 1092 ret = __gfs2_iomap_get(inode, pos, length, flags, iomap, &mp); 1093 if (ret) 1094 goto out_unlock; 1095 1096 switch(flags & (IOMAP_WRITE | IOMAP_ZERO)) { 1097 case IOMAP_WRITE: 1098 if (flags & IOMAP_DIRECT) { 1099 /* 1100 * Silently fall back to buffered I/O for stuffed files 1101 * or if we've got a hole (see gfs2_file_direct_write). 1102 */ 1103 if (iomap->type != IOMAP_MAPPED) 1104 ret = -ENOTBLK; 1105 goto out_unlock; 1106 } 1107 break; 1108 case IOMAP_ZERO: 1109 if (iomap->type == IOMAP_HOLE) 1110 goto out_unlock; 1111 break; 1112 default: 1113 goto out_unlock; 1114 } 1115 1116 ret = gfs2_iomap_begin_write(inode, pos, length, flags, iomap, &mp); 1117 1118 out_unlock: 1119 release_metapath(&mp); 1120 trace_gfs2_iomap_end(ip, iomap, ret); 1121 return ret; 1122 } 1123 1124 static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length, 1125 ssize_t written, unsigned flags, struct iomap *iomap) 1126 { 1127 struct gfs2_inode *ip = GFS2_I(inode); 1128 struct gfs2_sbd *sdp = GFS2_SB(inode); 1129 1130 switch (flags & (IOMAP_WRITE | IOMAP_ZERO)) { 1131 case IOMAP_WRITE: 1132 if (flags & IOMAP_DIRECT) 1133 return 0; 1134 break; 1135 case IOMAP_ZERO: 1136 if (iomap->type == IOMAP_HOLE) 1137 return 0; 1138 break; 1139 default: 1140 return 0; 1141 } 1142 1143 if (!gfs2_is_stuffed(ip)) 1144 gfs2_ordered_add_inode(ip); 1145 1146 if (inode == sdp->sd_rindex) 1147 adjust_fs_space(inode); 1148 1149 gfs2_inplace_release(ip); 1150 1151 if (ip->i_qadata && ip->i_qadata->qa_qd_num) 1152 gfs2_quota_unlock(ip); 1153 1154 if (length != written && (iomap->flags & IOMAP_F_NEW)) { 1155 /* Deallocate blocks that were just allocated. */ 1156 loff_t blockmask = i_blocksize(inode) - 1; 1157 loff_t end = (pos + length) & ~blockmask; 1158 1159 pos = (pos + written + blockmask) & ~blockmask; 1160 if (pos < end) { 1161 truncate_pagecache_range(inode, pos, end - 1); 1162 punch_hole(ip, pos, end - pos); 1163 } 1164 } 1165 1166 if (unlikely(!written)) 1167 return 0; 1168 1169 if (iomap->flags & IOMAP_F_SIZE_CHANGED) 1170 mark_inode_dirty(inode); 1171 set_bit(GLF_DIRTY, &ip->i_gl->gl_flags); 1172 return 0; 1173 } 1174 1175 const struct iomap_ops gfs2_iomap_ops = { 1176 .iomap_begin = gfs2_iomap_begin, 1177 .iomap_end = gfs2_iomap_end, 1178 }; 1179 1180 /** 1181 * gfs2_block_map - Map one or more blocks of an inode to a disk block 1182 * @inode: The inode 1183 * @lblock: The logical block number 1184 * @bh_map: The bh to be mapped 1185 * @create: True if its ok to alloc blocks to satify the request 1186 * 1187 * The size of the requested mapping is defined in bh_map->b_size. 1188 * 1189 * Clears buffer_mapped(bh_map) and leaves bh_map->b_size unchanged 1190 * when @lblock is not mapped. Sets buffer_mapped(bh_map) and 1191 * bh_map->b_size to indicate the size of the mapping when @lblock and 1192 * successive blocks are mapped, up to the requested size. 1193 * 1194 * Sets buffer_boundary() if a read of metadata will be required 1195 * before the next block can be mapped. Sets buffer_new() if new 1196 * blocks were allocated. 1197 * 1198 * Returns: errno 1199 */ 1200 1201 int gfs2_block_map(struct inode *inode, sector_t lblock, 1202 struct buffer_head *bh_map, int create) 1203 { 1204 struct gfs2_inode *ip = GFS2_I(inode); 1205 loff_t pos = (loff_t)lblock << inode->i_blkbits; 1206 loff_t length = bh_map->b_size; 1207 struct iomap iomap = { }; 1208 int ret; 1209 1210 clear_buffer_mapped(bh_map); 1211 clear_buffer_new(bh_map); 1212 clear_buffer_boundary(bh_map); 1213 trace_gfs2_bmap(ip, bh_map, lblock, create, 1); 1214 1215 if (!create) 1216 ret = gfs2_iomap_get(inode, pos, length, &iomap); 1217 else 1218 ret = gfs2_iomap_alloc(inode, pos, length, &iomap); 1219 if (ret) 1220 goto out; 1221 1222 if (iomap.length > bh_map->b_size) { 1223 iomap.length = bh_map->b_size; 1224 iomap.flags &= ~IOMAP_F_GFS2_BOUNDARY; 1225 } 1226 if (iomap.addr != IOMAP_NULL_ADDR) 1227 map_bh(bh_map, inode->i_sb, iomap.addr >> inode->i_blkbits); 1228 bh_map->b_size = iomap.length; 1229 if (iomap.flags & IOMAP_F_GFS2_BOUNDARY) 1230 set_buffer_boundary(bh_map); 1231 if (iomap.flags & IOMAP_F_NEW) 1232 set_buffer_new(bh_map); 1233 1234 out: 1235 trace_gfs2_bmap(ip, bh_map, lblock, create, ret); 1236 return ret; 1237 } 1238 1239 int gfs2_get_extent(struct inode *inode, u64 lblock, u64 *dblock, 1240 unsigned int *extlen) 1241 { 1242 unsigned int blkbits = inode->i_blkbits; 1243 struct iomap iomap = { }; 1244 unsigned int len; 1245 int ret; 1246 1247 ret = gfs2_iomap_get(inode, lblock << blkbits, *extlen << blkbits, 1248 &iomap); 1249 if (ret) 1250 return ret; 1251 if (iomap.type != IOMAP_MAPPED) 1252 return -EIO; 1253 *dblock = iomap.addr >> blkbits; 1254 len = iomap.length >> blkbits; 1255 if (len < *extlen) 1256 *extlen = len; 1257 return 0; 1258 } 1259 1260 int gfs2_alloc_extent(struct inode *inode, u64 lblock, u64 *dblock, 1261 unsigned int *extlen, bool *new) 1262 { 1263 unsigned int blkbits = inode->i_blkbits; 1264 struct iomap iomap = { }; 1265 unsigned int len; 1266 int ret; 1267 1268 ret = gfs2_iomap_alloc(inode, lblock << blkbits, *extlen << blkbits, 1269 &iomap); 1270 if (ret) 1271 return ret; 1272 if (iomap.type != IOMAP_MAPPED) 1273 return -EIO; 1274 *dblock = iomap.addr >> blkbits; 1275 len = iomap.length >> blkbits; 1276 if (len < *extlen) 1277 *extlen = len; 1278 *new = iomap.flags & IOMAP_F_NEW; 1279 return 0; 1280 } 1281 1282 /* 1283 * NOTE: Never call gfs2_block_zero_range with an open transaction because it 1284 * uses iomap write to perform its actions, which begin their own transactions 1285 * (iomap_begin, page_prepare, etc.) 1286 */ 1287 static int gfs2_block_zero_range(struct inode *inode, loff_t from, 1288 unsigned int length) 1289 { 1290 BUG_ON(current->journal_info); 1291 return iomap_zero_range(inode, from, length, NULL, &gfs2_iomap_ops); 1292 } 1293 1294 #define GFS2_JTRUNC_REVOKES 8192 1295 1296 /** 1297 * gfs2_journaled_truncate - Wrapper for truncate_pagecache for jdata files 1298 * @inode: The inode being truncated 1299 * @oldsize: The original (larger) size 1300 * @newsize: The new smaller size 1301 * 1302 * With jdata files, we have to journal a revoke for each block which is 1303 * truncated. As a result, we need to split this into separate transactions 1304 * if the number of pages being truncated gets too large. 1305 */ 1306 1307 static int gfs2_journaled_truncate(struct inode *inode, u64 oldsize, u64 newsize) 1308 { 1309 struct gfs2_sbd *sdp = GFS2_SB(inode); 1310 u64 max_chunk = GFS2_JTRUNC_REVOKES * sdp->sd_vfs->s_blocksize; 1311 u64 chunk; 1312 int error; 1313 1314 while (oldsize != newsize) { 1315 struct gfs2_trans *tr; 1316 unsigned int offs; 1317 1318 chunk = oldsize - newsize; 1319 if (chunk > max_chunk) 1320 chunk = max_chunk; 1321 1322 offs = oldsize & ~PAGE_MASK; 1323 if (offs && chunk > PAGE_SIZE) 1324 chunk = offs + ((chunk - offs) & PAGE_MASK); 1325 1326 truncate_pagecache(inode, oldsize - chunk); 1327 oldsize -= chunk; 1328 1329 tr = current->journal_info; 1330 if (!test_bit(TR_TOUCHED, &tr->tr_flags)) 1331 continue; 1332 1333 gfs2_trans_end(sdp); 1334 error = gfs2_trans_begin(sdp, RES_DINODE, GFS2_JTRUNC_REVOKES); 1335 if (error) 1336 return error; 1337 } 1338 1339 return 0; 1340 } 1341 1342 static int trunc_start(struct inode *inode, u64 newsize) 1343 { 1344 struct gfs2_inode *ip = GFS2_I(inode); 1345 struct gfs2_sbd *sdp = GFS2_SB(inode); 1346 struct buffer_head *dibh = NULL; 1347 int journaled = gfs2_is_jdata(ip); 1348 u64 oldsize = inode->i_size; 1349 int error; 1350 1351 if (!gfs2_is_stuffed(ip)) { 1352 unsigned int blocksize = i_blocksize(inode); 1353 unsigned int offs = newsize & (blocksize - 1); 1354 if (offs) { 1355 error = gfs2_block_zero_range(inode, newsize, 1356 blocksize - offs); 1357 if (error) 1358 return error; 1359 } 1360 } 1361 if (journaled) 1362 error = gfs2_trans_begin(sdp, RES_DINODE + RES_JDATA, GFS2_JTRUNC_REVOKES); 1363 else 1364 error = gfs2_trans_begin(sdp, RES_DINODE, 0); 1365 if (error) 1366 return error; 1367 1368 error = gfs2_meta_inode_buffer(ip, &dibh); 1369 if (error) 1370 goto out; 1371 1372 gfs2_trans_add_meta(ip->i_gl, dibh); 1373 1374 if (gfs2_is_stuffed(ip)) 1375 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + newsize); 1376 else 1377 ip->i_diskflags |= GFS2_DIF_TRUNC_IN_PROG; 1378 1379 i_size_write(inode, newsize); 1380 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode); 1381 gfs2_dinode_out(ip, dibh->b_data); 1382 1383 if (journaled) 1384 error = gfs2_journaled_truncate(inode, oldsize, newsize); 1385 else 1386 truncate_pagecache(inode, newsize); 1387 1388 out: 1389 brelse(dibh); 1390 if (current->journal_info) 1391 gfs2_trans_end(sdp); 1392 return error; 1393 } 1394 1395 int gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length, 1396 struct iomap *iomap) 1397 { 1398 struct metapath mp = { .mp_aheight = 1, }; 1399 int ret; 1400 1401 ret = __gfs2_iomap_get(inode, pos, length, 0, iomap, &mp); 1402 release_metapath(&mp); 1403 return ret; 1404 } 1405 1406 int gfs2_iomap_alloc(struct inode *inode, loff_t pos, loff_t length, 1407 struct iomap *iomap) 1408 { 1409 struct metapath mp = { .mp_aheight = 1, }; 1410 int ret; 1411 1412 ret = __gfs2_iomap_get(inode, pos, length, IOMAP_WRITE, iomap, &mp); 1413 if (!ret && iomap->type == IOMAP_HOLE) 1414 ret = __gfs2_iomap_alloc(inode, iomap, &mp); 1415 release_metapath(&mp); 1416 return ret; 1417 } 1418 1419 /** 1420 * sweep_bh_for_rgrps - find an rgrp in a meta buffer and free blocks therein 1421 * @ip: inode 1422 * @rd_gh: holder of resource group glock 1423 * @bh: buffer head to sweep 1424 * @start: starting point in bh 1425 * @end: end point in bh 1426 * @meta: true if bh points to metadata (rather than data) 1427 * @btotal: place to keep count of total blocks freed 1428 * 1429 * We sweep a metadata buffer (provided by the metapath) for blocks we need to 1430 * free, and free them all. However, we do it one rgrp at a time. If this 1431 * block has references to multiple rgrps, we break it into individual 1432 * transactions. This allows other processes to use the rgrps while we're 1433 * focused on a single one, for better concurrency / performance. 1434 * At every transaction boundary, we rewrite the inode into the journal. 1435 * That way the bitmaps are kept consistent with the inode and we can recover 1436 * if we're interrupted by power-outages. 1437 * 1438 * Returns: 0, or return code if an error occurred. 1439 * *btotal has the total number of blocks freed 1440 */ 1441 static int sweep_bh_for_rgrps(struct gfs2_inode *ip, struct gfs2_holder *rd_gh, 1442 struct buffer_head *bh, __be64 *start, __be64 *end, 1443 bool meta, u32 *btotal) 1444 { 1445 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1446 struct gfs2_rgrpd *rgd; 1447 struct gfs2_trans *tr; 1448 __be64 *p; 1449 int blks_outside_rgrp; 1450 u64 bn, bstart, isize_blks; 1451 s64 blen; /* needs to be s64 or gfs2_add_inode_blocks breaks */ 1452 int ret = 0; 1453 bool buf_in_tr = false; /* buffer was added to transaction */ 1454 1455 more_rgrps: 1456 rgd = NULL; 1457 if (gfs2_holder_initialized(rd_gh)) { 1458 rgd = gfs2_glock2rgrp(rd_gh->gh_gl); 1459 gfs2_assert_withdraw(sdp, 1460 gfs2_glock_is_locked_by_me(rd_gh->gh_gl)); 1461 } 1462 blks_outside_rgrp = 0; 1463 bstart = 0; 1464 blen = 0; 1465 1466 for (p = start; p < end; p++) { 1467 if (!*p) 1468 continue; 1469 bn = be64_to_cpu(*p); 1470 1471 if (rgd) { 1472 if (!rgrp_contains_block(rgd, bn)) { 1473 blks_outside_rgrp++; 1474 continue; 1475 } 1476 } else { 1477 rgd = gfs2_blk2rgrpd(sdp, bn, true); 1478 if (unlikely(!rgd)) { 1479 ret = -EIO; 1480 goto out; 1481 } 1482 ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 1483 LM_FLAG_NODE_SCOPE, rd_gh); 1484 if (ret) 1485 goto out; 1486 1487 /* Must be done with the rgrp glock held: */ 1488 if (gfs2_rs_active(&ip->i_res) && 1489 rgd == ip->i_res.rs_rgd) 1490 gfs2_rs_deltree(&ip->i_res); 1491 } 1492 1493 /* The size of our transactions will be unknown until we 1494 actually process all the metadata blocks that relate to 1495 the rgrp. So we estimate. We know it can't be more than 1496 the dinode's i_blocks and we don't want to exceed the 1497 journal flush threshold, sd_log_thresh2. */ 1498 if (current->journal_info == NULL) { 1499 unsigned int jblocks_rqsted, revokes; 1500 1501 jblocks_rqsted = rgd->rd_length + RES_DINODE + 1502 RES_INDIRECT; 1503 isize_blks = gfs2_get_inode_blocks(&ip->i_inode); 1504 if (isize_blks > atomic_read(&sdp->sd_log_thresh2)) 1505 jblocks_rqsted += 1506 atomic_read(&sdp->sd_log_thresh2); 1507 else 1508 jblocks_rqsted += isize_blks; 1509 revokes = jblocks_rqsted; 1510 if (meta) 1511 revokes += end - start; 1512 else if (ip->i_depth) 1513 revokes += sdp->sd_inptrs; 1514 ret = gfs2_trans_begin(sdp, jblocks_rqsted, revokes); 1515 if (ret) 1516 goto out_unlock; 1517 down_write(&ip->i_rw_mutex); 1518 } 1519 /* check if we will exceed the transaction blocks requested */ 1520 tr = current->journal_info; 1521 if (tr->tr_num_buf_new + RES_STATFS + 1522 RES_QUOTA >= atomic_read(&sdp->sd_log_thresh2)) { 1523 /* We set blks_outside_rgrp to ensure the loop will 1524 be repeated for the same rgrp, but with a new 1525 transaction. */ 1526 blks_outside_rgrp++; 1527 /* This next part is tricky. If the buffer was added 1528 to the transaction, we've already set some block 1529 pointers to 0, so we better follow through and free 1530 them, or we will introduce corruption (so break). 1531 This may be impossible, or at least rare, but I 1532 decided to cover the case regardless. 1533 1534 If the buffer was not added to the transaction 1535 (this call), doing so would exceed our transaction 1536 size, so we need to end the transaction and start a 1537 new one (so goto). */ 1538 1539 if (buf_in_tr) 1540 break; 1541 goto out_unlock; 1542 } 1543 1544 gfs2_trans_add_meta(ip->i_gl, bh); 1545 buf_in_tr = true; 1546 *p = 0; 1547 if (bstart + blen == bn) { 1548 blen++; 1549 continue; 1550 } 1551 if (bstart) { 1552 __gfs2_free_blocks(ip, rgd, bstart, (u32)blen, meta); 1553 (*btotal) += blen; 1554 gfs2_add_inode_blocks(&ip->i_inode, -blen); 1555 } 1556 bstart = bn; 1557 blen = 1; 1558 } 1559 if (bstart) { 1560 __gfs2_free_blocks(ip, rgd, bstart, (u32)blen, meta); 1561 (*btotal) += blen; 1562 gfs2_add_inode_blocks(&ip->i_inode, -blen); 1563 } 1564 out_unlock: 1565 if (!ret && blks_outside_rgrp) { /* If buffer still has non-zero blocks 1566 outside the rgrp we just processed, 1567 do it all over again. */ 1568 if (current->journal_info) { 1569 struct buffer_head *dibh; 1570 1571 ret = gfs2_meta_inode_buffer(ip, &dibh); 1572 if (ret) 1573 goto out; 1574 1575 /* Every transaction boundary, we rewrite the dinode 1576 to keep its di_blocks current in case of failure. */ 1577 ip->i_inode.i_mtime = ip->i_inode.i_ctime = 1578 current_time(&ip->i_inode); 1579 gfs2_trans_add_meta(ip->i_gl, dibh); 1580 gfs2_dinode_out(ip, dibh->b_data); 1581 brelse(dibh); 1582 up_write(&ip->i_rw_mutex); 1583 gfs2_trans_end(sdp); 1584 buf_in_tr = false; 1585 } 1586 gfs2_glock_dq_uninit(rd_gh); 1587 cond_resched(); 1588 goto more_rgrps; 1589 } 1590 out: 1591 return ret; 1592 } 1593 1594 static bool mp_eq_to_hgt(struct metapath *mp, __u16 *list, unsigned int h) 1595 { 1596 if (memcmp(mp->mp_list, list, h * sizeof(mp->mp_list[0]))) 1597 return false; 1598 return true; 1599 } 1600 1601 /** 1602 * find_nonnull_ptr - find a non-null pointer given a metapath and height 1603 * @sdp: The superblock 1604 * @mp: starting metapath 1605 * @h: desired height to search 1606 * @end_list: See punch_hole(). 1607 * @end_aligned: See punch_hole(). 1608 * 1609 * Assumes the metapath is valid (with buffers) out to height h. 1610 * Returns: true if a non-null pointer was found in the metapath buffer 1611 * false if all remaining pointers are NULL in the buffer 1612 */ 1613 static bool find_nonnull_ptr(struct gfs2_sbd *sdp, struct metapath *mp, 1614 unsigned int h, 1615 __u16 *end_list, unsigned int end_aligned) 1616 { 1617 struct buffer_head *bh = mp->mp_bh[h]; 1618 __be64 *first, *ptr, *end; 1619 1620 first = metaptr1(h, mp); 1621 ptr = first + mp->mp_list[h]; 1622 end = (__be64 *)(bh->b_data + bh->b_size); 1623 if (end_list && mp_eq_to_hgt(mp, end_list, h)) { 1624 bool keep_end = h < end_aligned; 1625 end = first + end_list[h] + keep_end; 1626 } 1627 1628 while (ptr < end) { 1629 if (*ptr) { /* if we have a non-null pointer */ 1630 mp->mp_list[h] = ptr - first; 1631 h++; 1632 if (h < GFS2_MAX_META_HEIGHT) 1633 mp->mp_list[h] = 0; 1634 return true; 1635 } 1636 ptr++; 1637 } 1638 return false; 1639 } 1640 1641 enum dealloc_states { 1642 DEALLOC_MP_FULL = 0, /* Strip a metapath with all buffers read in */ 1643 DEALLOC_MP_LOWER = 1, /* lower the metapath strip height */ 1644 DEALLOC_FILL_MP = 2, /* Fill in the metapath to the given height. */ 1645 DEALLOC_DONE = 3, /* process complete */ 1646 }; 1647 1648 static inline void 1649 metapointer_range(struct metapath *mp, int height, 1650 __u16 *start_list, unsigned int start_aligned, 1651 __u16 *end_list, unsigned int end_aligned, 1652 __be64 **start, __be64 **end) 1653 { 1654 struct buffer_head *bh = mp->mp_bh[height]; 1655 __be64 *first; 1656 1657 first = metaptr1(height, mp); 1658 *start = first; 1659 if (mp_eq_to_hgt(mp, start_list, height)) { 1660 bool keep_start = height < start_aligned; 1661 *start = first + start_list[height] + keep_start; 1662 } 1663 *end = (__be64 *)(bh->b_data + bh->b_size); 1664 if (end_list && mp_eq_to_hgt(mp, end_list, height)) { 1665 bool keep_end = height < end_aligned; 1666 *end = first + end_list[height] + keep_end; 1667 } 1668 } 1669 1670 static inline bool walk_done(struct gfs2_sbd *sdp, 1671 struct metapath *mp, int height, 1672 __u16 *end_list, unsigned int end_aligned) 1673 { 1674 __u16 end; 1675 1676 if (end_list) { 1677 bool keep_end = height < end_aligned; 1678 if (!mp_eq_to_hgt(mp, end_list, height)) 1679 return false; 1680 end = end_list[height] + keep_end; 1681 } else 1682 end = (height > 0) ? sdp->sd_inptrs : sdp->sd_diptrs; 1683 return mp->mp_list[height] >= end; 1684 } 1685 1686 /** 1687 * punch_hole - deallocate blocks in a file 1688 * @ip: inode to truncate 1689 * @offset: the start of the hole 1690 * @length: the size of the hole (or 0 for truncate) 1691 * 1692 * Punch a hole into a file or truncate a file at a given position. This 1693 * function operates in whole blocks (@offset and @length are rounded 1694 * accordingly); partially filled blocks must be cleared otherwise. 1695 * 1696 * This function works from the bottom up, and from the right to the left. In 1697 * other words, it strips off the highest layer (data) before stripping any of 1698 * the metadata. Doing it this way is best in case the operation is interrupted 1699 * by power failure, etc. The dinode is rewritten in every transaction to 1700 * guarantee integrity. 1701 */ 1702 static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length) 1703 { 1704 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1705 u64 maxsize = sdp->sd_heightsize[ip->i_height]; 1706 struct metapath mp = {}; 1707 struct buffer_head *dibh, *bh; 1708 struct gfs2_holder rd_gh; 1709 unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift; 1710 u64 lblock = (offset + (1 << bsize_shift) - 1) >> bsize_shift; 1711 __u16 start_list[GFS2_MAX_META_HEIGHT]; 1712 __u16 __end_list[GFS2_MAX_META_HEIGHT], *end_list = NULL; 1713 unsigned int start_aligned, end_aligned; 1714 unsigned int strip_h = ip->i_height - 1; 1715 u32 btotal = 0; 1716 int ret, state; 1717 int mp_h; /* metapath buffers are read in to this height */ 1718 u64 prev_bnr = 0; 1719 __be64 *start, *end; 1720 1721 if (offset >= maxsize) { 1722 /* 1723 * The starting point lies beyond the allocated meta-data; 1724 * there are no blocks do deallocate. 1725 */ 1726 return 0; 1727 } 1728 1729 /* 1730 * The start position of the hole is defined by lblock, start_list, and 1731 * start_aligned. The end position of the hole is defined by lend, 1732 * end_list, and end_aligned. 1733 * 1734 * start_aligned and end_aligned define down to which height the start 1735 * and end positions are aligned to the metadata tree (i.e., the 1736 * position is a multiple of the metadata granularity at the height 1737 * above). This determines at which heights additional meta pointers 1738 * needs to be preserved for the remaining data. 1739 */ 1740 1741 if (length) { 1742 u64 end_offset = offset + length; 1743 u64 lend; 1744 1745 /* 1746 * Clip the end at the maximum file size for the given height: 1747 * that's how far the metadata goes; files bigger than that 1748 * will have additional layers of indirection. 1749 */ 1750 if (end_offset > maxsize) 1751 end_offset = maxsize; 1752 lend = end_offset >> bsize_shift; 1753 1754 if (lblock >= lend) 1755 return 0; 1756 1757 find_metapath(sdp, lend, &mp, ip->i_height); 1758 end_list = __end_list; 1759 memcpy(end_list, mp.mp_list, sizeof(mp.mp_list)); 1760 1761 for (mp_h = ip->i_height - 1; mp_h > 0; mp_h--) { 1762 if (end_list[mp_h]) 1763 break; 1764 } 1765 end_aligned = mp_h; 1766 } 1767 1768 find_metapath(sdp, lblock, &mp, ip->i_height); 1769 memcpy(start_list, mp.mp_list, sizeof(start_list)); 1770 1771 for (mp_h = ip->i_height - 1; mp_h > 0; mp_h--) { 1772 if (start_list[mp_h]) 1773 break; 1774 } 1775 start_aligned = mp_h; 1776 1777 ret = gfs2_meta_inode_buffer(ip, &dibh); 1778 if (ret) 1779 return ret; 1780 1781 mp.mp_bh[0] = dibh; 1782 ret = lookup_metapath(ip, &mp); 1783 if (ret) 1784 goto out_metapath; 1785 1786 /* issue read-ahead on metadata */ 1787 for (mp_h = 0; mp_h < mp.mp_aheight - 1; mp_h++) { 1788 metapointer_range(&mp, mp_h, start_list, start_aligned, 1789 end_list, end_aligned, &start, &end); 1790 gfs2_metapath_ra(ip->i_gl, start, end); 1791 } 1792 1793 if (mp.mp_aheight == ip->i_height) 1794 state = DEALLOC_MP_FULL; /* We have a complete metapath */ 1795 else 1796 state = DEALLOC_FILL_MP; /* deal with partial metapath */ 1797 1798 ret = gfs2_rindex_update(sdp); 1799 if (ret) 1800 goto out_metapath; 1801 1802 ret = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE); 1803 if (ret) 1804 goto out_metapath; 1805 gfs2_holder_mark_uninitialized(&rd_gh); 1806 1807 mp_h = strip_h; 1808 1809 while (state != DEALLOC_DONE) { 1810 switch (state) { 1811 /* Truncate a full metapath at the given strip height. 1812 * Note that strip_h == mp_h in order to be in this state. */ 1813 case DEALLOC_MP_FULL: 1814 bh = mp.mp_bh[mp_h]; 1815 gfs2_assert_withdraw(sdp, bh); 1816 if (gfs2_assert_withdraw(sdp, 1817 prev_bnr != bh->b_blocknr)) { 1818 fs_emerg(sdp, "inode %llu, block:%llu, i_h:%u," 1819 "s_h:%u, mp_h:%u\n", 1820 (unsigned long long)ip->i_no_addr, 1821 prev_bnr, ip->i_height, strip_h, mp_h); 1822 } 1823 prev_bnr = bh->b_blocknr; 1824 1825 if (gfs2_metatype_check(sdp, bh, 1826 (mp_h ? GFS2_METATYPE_IN : 1827 GFS2_METATYPE_DI))) { 1828 ret = -EIO; 1829 goto out; 1830 } 1831 1832 /* 1833 * Below, passing end_aligned as 0 gives us the 1834 * metapointer range excluding the end point: the end 1835 * point is the first metapath we must not deallocate! 1836 */ 1837 1838 metapointer_range(&mp, mp_h, start_list, start_aligned, 1839 end_list, 0 /* end_aligned */, 1840 &start, &end); 1841 ret = sweep_bh_for_rgrps(ip, &rd_gh, mp.mp_bh[mp_h], 1842 start, end, 1843 mp_h != ip->i_height - 1, 1844 &btotal); 1845 1846 /* If we hit an error or just swept dinode buffer, 1847 just exit. */ 1848 if (ret || !mp_h) { 1849 state = DEALLOC_DONE; 1850 break; 1851 } 1852 state = DEALLOC_MP_LOWER; 1853 break; 1854 1855 /* lower the metapath strip height */ 1856 case DEALLOC_MP_LOWER: 1857 /* We're done with the current buffer, so release it, 1858 unless it's the dinode buffer. Then back up to the 1859 previous pointer. */ 1860 if (mp_h) { 1861 brelse(mp.mp_bh[mp_h]); 1862 mp.mp_bh[mp_h] = NULL; 1863 } 1864 /* If we can't get any lower in height, we've stripped 1865 off all we can. Next step is to back up and start 1866 stripping the previous level of metadata. */ 1867 if (mp_h == 0) { 1868 strip_h--; 1869 memcpy(mp.mp_list, start_list, sizeof(start_list)); 1870 mp_h = strip_h; 1871 state = DEALLOC_FILL_MP; 1872 break; 1873 } 1874 mp.mp_list[mp_h] = 0; 1875 mp_h--; /* search one metadata height down */ 1876 mp.mp_list[mp_h]++; 1877 if (walk_done(sdp, &mp, mp_h, end_list, end_aligned)) 1878 break; 1879 /* Here we've found a part of the metapath that is not 1880 * allocated. We need to search at that height for the 1881 * next non-null pointer. */ 1882 if (find_nonnull_ptr(sdp, &mp, mp_h, end_list, end_aligned)) { 1883 state = DEALLOC_FILL_MP; 1884 mp_h++; 1885 } 1886 /* No more non-null pointers at this height. Back up 1887 to the previous height and try again. */ 1888 break; /* loop around in the same state */ 1889 1890 /* Fill the metapath with buffers to the given height. */ 1891 case DEALLOC_FILL_MP: 1892 /* Fill the buffers out to the current height. */ 1893 ret = fillup_metapath(ip, &mp, mp_h); 1894 if (ret < 0) 1895 goto out; 1896 1897 /* On the first pass, issue read-ahead on metadata. */ 1898 if (mp.mp_aheight > 1 && strip_h == ip->i_height - 1) { 1899 unsigned int height = mp.mp_aheight - 1; 1900 1901 /* No read-ahead for data blocks. */ 1902 if (mp.mp_aheight - 1 == strip_h) 1903 height--; 1904 1905 for (; height >= mp.mp_aheight - ret; height--) { 1906 metapointer_range(&mp, height, 1907 start_list, start_aligned, 1908 end_list, end_aligned, 1909 &start, &end); 1910 gfs2_metapath_ra(ip->i_gl, start, end); 1911 } 1912 } 1913 1914 /* If buffers found for the entire strip height */ 1915 if (mp.mp_aheight - 1 == strip_h) { 1916 state = DEALLOC_MP_FULL; 1917 break; 1918 } 1919 if (mp.mp_aheight < ip->i_height) /* We have a partial height */ 1920 mp_h = mp.mp_aheight - 1; 1921 1922 /* If we find a non-null block pointer, crawl a bit 1923 higher up in the metapath and try again, otherwise 1924 we need to look lower for a new starting point. */ 1925 if (find_nonnull_ptr(sdp, &mp, mp_h, end_list, end_aligned)) 1926 mp_h++; 1927 else 1928 state = DEALLOC_MP_LOWER; 1929 break; 1930 } 1931 } 1932 1933 if (btotal) { 1934 if (current->journal_info == NULL) { 1935 ret = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS + 1936 RES_QUOTA, 0); 1937 if (ret) 1938 goto out; 1939 down_write(&ip->i_rw_mutex); 1940 } 1941 gfs2_statfs_change(sdp, 0, +btotal, 0); 1942 gfs2_quota_change(ip, -(s64)btotal, ip->i_inode.i_uid, 1943 ip->i_inode.i_gid); 1944 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode); 1945 gfs2_trans_add_meta(ip->i_gl, dibh); 1946 gfs2_dinode_out(ip, dibh->b_data); 1947 up_write(&ip->i_rw_mutex); 1948 gfs2_trans_end(sdp); 1949 } 1950 1951 out: 1952 if (gfs2_holder_initialized(&rd_gh)) 1953 gfs2_glock_dq_uninit(&rd_gh); 1954 if (current->journal_info) { 1955 up_write(&ip->i_rw_mutex); 1956 gfs2_trans_end(sdp); 1957 cond_resched(); 1958 } 1959 gfs2_quota_unhold(ip); 1960 out_metapath: 1961 release_metapath(&mp); 1962 return ret; 1963 } 1964 1965 static int trunc_end(struct gfs2_inode *ip) 1966 { 1967 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1968 struct buffer_head *dibh; 1969 int error; 1970 1971 error = gfs2_trans_begin(sdp, RES_DINODE, 0); 1972 if (error) 1973 return error; 1974 1975 down_write(&ip->i_rw_mutex); 1976 1977 error = gfs2_meta_inode_buffer(ip, &dibh); 1978 if (error) 1979 goto out; 1980 1981 if (!i_size_read(&ip->i_inode)) { 1982 ip->i_height = 0; 1983 ip->i_goal = ip->i_no_addr; 1984 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode)); 1985 gfs2_ordered_del_inode(ip); 1986 } 1987 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode); 1988 ip->i_diskflags &= ~GFS2_DIF_TRUNC_IN_PROG; 1989 1990 gfs2_trans_add_meta(ip->i_gl, dibh); 1991 gfs2_dinode_out(ip, dibh->b_data); 1992 brelse(dibh); 1993 1994 out: 1995 up_write(&ip->i_rw_mutex); 1996 gfs2_trans_end(sdp); 1997 return error; 1998 } 1999 2000 /** 2001 * do_shrink - make a file smaller 2002 * @inode: the inode 2003 * @newsize: the size to make the file 2004 * 2005 * Called with an exclusive lock on @inode. The @size must 2006 * be equal to or smaller than the current inode size. 2007 * 2008 * Returns: errno 2009 */ 2010 2011 static int do_shrink(struct inode *inode, u64 newsize) 2012 { 2013 struct gfs2_inode *ip = GFS2_I(inode); 2014 int error; 2015 2016 error = trunc_start(inode, newsize); 2017 if (error < 0) 2018 return error; 2019 if (gfs2_is_stuffed(ip)) 2020 return 0; 2021 2022 error = punch_hole(ip, newsize, 0); 2023 if (error == 0) 2024 error = trunc_end(ip); 2025 2026 return error; 2027 } 2028 2029 void gfs2_trim_blocks(struct inode *inode) 2030 { 2031 int ret; 2032 2033 ret = do_shrink(inode, inode->i_size); 2034 WARN_ON(ret != 0); 2035 } 2036 2037 /** 2038 * do_grow - Touch and update inode size 2039 * @inode: The inode 2040 * @size: The new size 2041 * 2042 * This function updates the timestamps on the inode and 2043 * may also increase the size of the inode. This function 2044 * must not be called with @size any smaller than the current 2045 * inode size. 2046 * 2047 * Although it is not strictly required to unstuff files here, 2048 * earlier versions of GFS2 have a bug in the stuffed file reading 2049 * code which will result in a buffer overrun if the size is larger 2050 * than the max stuffed file size. In order to prevent this from 2051 * occurring, such files are unstuffed, but in other cases we can 2052 * just update the inode size directly. 2053 * 2054 * Returns: 0 on success, or -ve on error 2055 */ 2056 2057 static int do_grow(struct inode *inode, u64 size) 2058 { 2059 struct gfs2_inode *ip = GFS2_I(inode); 2060 struct gfs2_sbd *sdp = GFS2_SB(inode); 2061 struct gfs2_alloc_parms ap = { .target = 1, }; 2062 struct buffer_head *dibh; 2063 int error; 2064 int unstuff = 0; 2065 2066 if (gfs2_is_stuffed(ip) && size > gfs2_max_stuffed_size(ip)) { 2067 error = gfs2_quota_lock_check(ip, &ap); 2068 if (error) 2069 return error; 2070 2071 error = gfs2_inplace_reserve(ip, &ap); 2072 if (error) 2073 goto do_grow_qunlock; 2074 unstuff = 1; 2075 } 2076 2077 error = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS + RES_RG_BIT + 2078 (unstuff && 2079 gfs2_is_jdata(ip) ? RES_JDATA : 0) + 2080 (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF ? 2081 0 : RES_QUOTA), 0); 2082 if (error) 2083 goto do_grow_release; 2084 2085 if (unstuff) { 2086 error = gfs2_unstuff_dinode(ip); 2087 if (error) 2088 goto do_end_trans; 2089 } 2090 2091 error = gfs2_meta_inode_buffer(ip, &dibh); 2092 if (error) 2093 goto do_end_trans; 2094 2095 truncate_setsize(inode, size); 2096 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode); 2097 gfs2_trans_add_meta(ip->i_gl, dibh); 2098 gfs2_dinode_out(ip, dibh->b_data); 2099 brelse(dibh); 2100 2101 do_end_trans: 2102 gfs2_trans_end(sdp); 2103 do_grow_release: 2104 if (unstuff) { 2105 gfs2_inplace_release(ip); 2106 do_grow_qunlock: 2107 gfs2_quota_unlock(ip); 2108 } 2109 return error; 2110 } 2111 2112 /** 2113 * gfs2_setattr_size - make a file a given size 2114 * @inode: the inode 2115 * @newsize: the size to make the file 2116 * 2117 * The file size can grow, shrink, or stay the same size. This 2118 * is called holding i_rwsem and an exclusive glock on the inode 2119 * in question. 2120 * 2121 * Returns: errno 2122 */ 2123 2124 int gfs2_setattr_size(struct inode *inode, u64 newsize) 2125 { 2126 struct gfs2_inode *ip = GFS2_I(inode); 2127 int ret; 2128 2129 BUG_ON(!S_ISREG(inode->i_mode)); 2130 2131 ret = inode_newsize_ok(inode, newsize); 2132 if (ret) 2133 return ret; 2134 2135 inode_dio_wait(inode); 2136 2137 ret = gfs2_qa_get(ip); 2138 if (ret) 2139 goto out; 2140 2141 if (newsize >= inode->i_size) { 2142 ret = do_grow(inode, newsize); 2143 goto out; 2144 } 2145 2146 ret = do_shrink(inode, newsize); 2147 out: 2148 gfs2_rs_delete(ip); 2149 gfs2_qa_put(ip); 2150 return ret; 2151 } 2152 2153 int gfs2_truncatei_resume(struct gfs2_inode *ip) 2154 { 2155 int error; 2156 error = punch_hole(ip, i_size_read(&ip->i_inode), 0); 2157 if (!error) 2158 error = trunc_end(ip); 2159 return error; 2160 } 2161 2162 int gfs2_file_dealloc(struct gfs2_inode *ip) 2163 { 2164 return punch_hole(ip, 0, 0); 2165 } 2166 2167 /** 2168 * gfs2_free_journal_extents - Free cached journal bmap info 2169 * @jd: The journal 2170 * 2171 */ 2172 2173 void gfs2_free_journal_extents(struct gfs2_jdesc *jd) 2174 { 2175 struct gfs2_journal_extent *jext; 2176 2177 while(!list_empty(&jd->extent_list)) { 2178 jext = list_first_entry(&jd->extent_list, struct gfs2_journal_extent, list); 2179 list_del(&jext->list); 2180 kfree(jext); 2181 } 2182 } 2183 2184 /** 2185 * gfs2_add_jextent - Add or merge a new extent to extent cache 2186 * @jd: The journal descriptor 2187 * @lblock: The logical block at start of new extent 2188 * @dblock: The physical block at start of new extent 2189 * @blocks: Size of extent in fs blocks 2190 * 2191 * Returns: 0 on success or -ENOMEM 2192 */ 2193 2194 static int gfs2_add_jextent(struct gfs2_jdesc *jd, u64 lblock, u64 dblock, u64 blocks) 2195 { 2196 struct gfs2_journal_extent *jext; 2197 2198 if (!list_empty(&jd->extent_list)) { 2199 jext = list_last_entry(&jd->extent_list, struct gfs2_journal_extent, list); 2200 if ((jext->dblock + jext->blocks) == dblock) { 2201 jext->blocks += blocks; 2202 return 0; 2203 } 2204 } 2205 2206 jext = kzalloc(sizeof(struct gfs2_journal_extent), GFP_NOFS); 2207 if (jext == NULL) 2208 return -ENOMEM; 2209 jext->dblock = dblock; 2210 jext->lblock = lblock; 2211 jext->blocks = blocks; 2212 list_add_tail(&jext->list, &jd->extent_list); 2213 jd->nr_extents++; 2214 return 0; 2215 } 2216 2217 /** 2218 * gfs2_map_journal_extents - Cache journal bmap info 2219 * @sdp: The super block 2220 * @jd: The journal to map 2221 * 2222 * Create a reusable "extent" mapping from all logical 2223 * blocks to all physical blocks for the given journal. This will save 2224 * us time when writing journal blocks. Most journals will have only one 2225 * extent that maps all their logical blocks. That's because gfs2.mkfs 2226 * arranges the journal blocks sequentially to maximize performance. 2227 * So the extent would map the first block for the entire file length. 2228 * However, gfs2_jadd can happen while file activity is happening, so 2229 * those journals may not be sequential. Less likely is the case where 2230 * the users created their own journals by mounting the metafs and 2231 * laying it out. But it's still possible. These journals might have 2232 * several extents. 2233 * 2234 * Returns: 0 on success, or error on failure 2235 */ 2236 2237 int gfs2_map_journal_extents(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd) 2238 { 2239 u64 lblock = 0; 2240 u64 lblock_stop; 2241 struct gfs2_inode *ip = GFS2_I(jd->jd_inode); 2242 struct buffer_head bh; 2243 unsigned int shift = sdp->sd_sb.sb_bsize_shift; 2244 u64 size; 2245 int rc; 2246 ktime_t start, end; 2247 2248 start = ktime_get(); 2249 lblock_stop = i_size_read(jd->jd_inode) >> shift; 2250 size = (lblock_stop - lblock) << shift; 2251 jd->nr_extents = 0; 2252 WARN_ON(!list_empty(&jd->extent_list)); 2253 2254 do { 2255 bh.b_state = 0; 2256 bh.b_blocknr = 0; 2257 bh.b_size = size; 2258 rc = gfs2_block_map(jd->jd_inode, lblock, &bh, 0); 2259 if (rc || !buffer_mapped(&bh)) 2260 goto fail; 2261 rc = gfs2_add_jextent(jd, lblock, bh.b_blocknr, bh.b_size >> shift); 2262 if (rc) 2263 goto fail; 2264 size -= bh.b_size; 2265 lblock += (bh.b_size >> ip->i_inode.i_blkbits); 2266 } while(size > 0); 2267 2268 end = ktime_get(); 2269 fs_info(sdp, "journal %d mapped with %u extents in %lldms\n", jd->jd_jid, 2270 jd->nr_extents, ktime_ms_delta(end, start)); 2271 return 0; 2272 2273 fail: 2274 fs_warn(sdp, "error %d mapping journal %u at offset %llu (extent %u)\n", 2275 rc, jd->jd_jid, 2276 (unsigned long long)(i_size_read(jd->jd_inode) - size), 2277 jd->nr_extents); 2278 fs_warn(sdp, "bmap=%d lblock=%llu block=%llu, state=0x%08lx, size=%llu\n", 2279 rc, (unsigned long long)lblock, (unsigned long long)bh.b_blocknr, 2280 bh.b_state, (unsigned long long)bh.b_size); 2281 gfs2_free_journal_extents(jd); 2282 return rc; 2283 } 2284 2285 /** 2286 * gfs2_write_alloc_required - figure out if a write will require an allocation 2287 * @ip: the file being written to 2288 * @offset: the offset to write to 2289 * @len: the number of bytes being written 2290 * 2291 * Returns: 1 if an alloc is required, 0 otherwise 2292 */ 2293 2294 int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset, 2295 unsigned int len) 2296 { 2297 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 2298 struct buffer_head bh; 2299 unsigned int shift; 2300 u64 lblock, lblock_stop, size; 2301 u64 end_of_file; 2302 2303 if (!len) 2304 return 0; 2305 2306 if (gfs2_is_stuffed(ip)) { 2307 if (offset + len > gfs2_max_stuffed_size(ip)) 2308 return 1; 2309 return 0; 2310 } 2311 2312 shift = sdp->sd_sb.sb_bsize_shift; 2313 BUG_ON(gfs2_is_dir(ip)); 2314 end_of_file = (i_size_read(&ip->i_inode) + sdp->sd_sb.sb_bsize - 1) >> shift; 2315 lblock = offset >> shift; 2316 lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift; 2317 if (lblock_stop > end_of_file && ip != GFS2_I(sdp->sd_rindex)) 2318 return 1; 2319 2320 size = (lblock_stop - lblock) << shift; 2321 do { 2322 bh.b_state = 0; 2323 bh.b_size = size; 2324 gfs2_block_map(&ip->i_inode, lblock, &bh, 0); 2325 if (!buffer_mapped(&bh)) 2326 return 1; 2327 size -= bh.b_size; 2328 lblock += (bh.b_size >> ip->i_inode.i_blkbits); 2329 } while(size > 0); 2330 2331 return 0; 2332 } 2333 2334 static int stuffed_zero_range(struct inode *inode, loff_t offset, loff_t length) 2335 { 2336 struct gfs2_inode *ip = GFS2_I(inode); 2337 struct buffer_head *dibh; 2338 int error; 2339 2340 if (offset >= inode->i_size) 2341 return 0; 2342 if (offset + length > inode->i_size) 2343 length = inode->i_size - offset; 2344 2345 error = gfs2_meta_inode_buffer(ip, &dibh); 2346 if (error) 2347 return error; 2348 gfs2_trans_add_meta(ip->i_gl, dibh); 2349 memset(dibh->b_data + sizeof(struct gfs2_dinode) + offset, 0, 2350 length); 2351 brelse(dibh); 2352 return 0; 2353 } 2354 2355 static int gfs2_journaled_truncate_range(struct inode *inode, loff_t offset, 2356 loff_t length) 2357 { 2358 struct gfs2_sbd *sdp = GFS2_SB(inode); 2359 loff_t max_chunk = GFS2_JTRUNC_REVOKES * sdp->sd_vfs->s_blocksize; 2360 int error; 2361 2362 while (length) { 2363 struct gfs2_trans *tr; 2364 loff_t chunk; 2365 unsigned int offs; 2366 2367 chunk = length; 2368 if (chunk > max_chunk) 2369 chunk = max_chunk; 2370 2371 offs = offset & ~PAGE_MASK; 2372 if (offs && chunk > PAGE_SIZE) 2373 chunk = offs + ((chunk - offs) & PAGE_MASK); 2374 2375 truncate_pagecache_range(inode, offset, chunk); 2376 offset += chunk; 2377 length -= chunk; 2378 2379 tr = current->journal_info; 2380 if (!test_bit(TR_TOUCHED, &tr->tr_flags)) 2381 continue; 2382 2383 gfs2_trans_end(sdp); 2384 error = gfs2_trans_begin(sdp, RES_DINODE, GFS2_JTRUNC_REVOKES); 2385 if (error) 2386 return error; 2387 } 2388 return 0; 2389 } 2390 2391 int __gfs2_punch_hole(struct file *file, loff_t offset, loff_t length) 2392 { 2393 struct inode *inode = file_inode(file); 2394 struct gfs2_inode *ip = GFS2_I(inode); 2395 struct gfs2_sbd *sdp = GFS2_SB(inode); 2396 unsigned int blocksize = i_blocksize(inode); 2397 loff_t start, end; 2398 int error; 2399 2400 if (!gfs2_is_stuffed(ip)) { 2401 unsigned int start_off, end_len; 2402 2403 start_off = offset & (blocksize - 1); 2404 end_len = (offset + length) & (blocksize - 1); 2405 if (start_off) { 2406 unsigned int len = length; 2407 if (length > blocksize - start_off) 2408 len = blocksize - start_off; 2409 error = gfs2_block_zero_range(inode, offset, len); 2410 if (error) 2411 goto out; 2412 if (start_off + length < blocksize) 2413 end_len = 0; 2414 } 2415 if (end_len) { 2416 error = gfs2_block_zero_range(inode, 2417 offset + length - end_len, end_len); 2418 if (error) 2419 goto out; 2420 } 2421 } 2422 2423 start = round_down(offset, blocksize); 2424 end = round_up(offset + length, blocksize) - 1; 2425 error = filemap_write_and_wait_range(inode->i_mapping, start, end); 2426 if (error) 2427 return error; 2428 2429 if (gfs2_is_jdata(ip)) 2430 error = gfs2_trans_begin(sdp, RES_DINODE + 2 * RES_JDATA, 2431 GFS2_JTRUNC_REVOKES); 2432 else 2433 error = gfs2_trans_begin(sdp, RES_DINODE, 0); 2434 if (error) 2435 return error; 2436 2437 if (gfs2_is_stuffed(ip)) { 2438 error = stuffed_zero_range(inode, offset, length); 2439 if (error) 2440 goto out; 2441 } 2442 2443 if (gfs2_is_jdata(ip)) { 2444 BUG_ON(!current->journal_info); 2445 gfs2_journaled_truncate_range(inode, offset, length); 2446 } else 2447 truncate_pagecache_range(inode, offset, offset + length - 1); 2448 2449 file_update_time(file); 2450 mark_inode_dirty(inode); 2451 2452 if (current->journal_info) 2453 gfs2_trans_end(sdp); 2454 2455 if (!gfs2_is_stuffed(ip)) 2456 error = punch_hole(ip, offset, length); 2457 2458 out: 2459 if (current->journal_info) 2460 gfs2_trans_end(sdp); 2461 return error; 2462 } 2463 2464 static int gfs2_map_blocks(struct iomap_writepage_ctx *wpc, struct inode *inode, 2465 loff_t offset) 2466 { 2467 int ret; 2468 2469 if (WARN_ON_ONCE(gfs2_is_stuffed(GFS2_I(inode)))) 2470 return -EIO; 2471 2472 if (offset >= wpc->iomap.offset && 2473 offset < wpc->iomap.offset + wpc->iomap.length) 2474 return 0; 2475 2476 memset(&wpc->iomap, 0, sizeof(wpc->iomap)); 2477 ret = gfs2_iomap_get(inode, offset, INT_MAX, &wpc->iomap); 2478 return ret; 2479 } 2480 2481 const struct iomap_writeback_ops gfs2_writeback_ops = { 2482 .map_blocks = gfs2_map_blocks, 2483 }; 2484