1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 4 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. 5 */ 6 7 #include <linux/spinlock.h> 8 #include <linux/completion.h> 9 #include <linux/buffer_head.h> 10 #include <linux/blkdev.h> 11 #include <linux/gfs2_ondisk.h> 12 #include <linux/crc32.h> 13 #include <linux/iomap.h> 14 #include <linux/ktime.h> 15 16 #include "gfs2.h" 17 #include "incore.h" 18 #include "bmap.h" 19 #include "glock.h" 20 #include "inode.h" 21 #include "meta_io.h" 22 #include "quota.h" 23 #include "rgrp.h" 24 #include "log.h" 25 #include "super.h" 26 #include "trans.h" 27 #include "dir.h" 28 #include "util.h" 29 #include "aops.h" 30 #include "trace_gfs2.h" 31 32 /* This doesn't need to be that large as max 64 bit pointers in a 4k 33 * block is 512, so __u16 is fine for that. It saves stack space to 34 * keep it small. 35 */ 36 struct metapath { 37 struct buffer_head *mp_bh[GFS2_MAX_META_HEIGHT]; 38 __u16 mp_list[GFS2_MAX_META_HEIGHT]; 39 int mp_fheight; /* find_metapath height */ 40 int mp_aheight; /* actual height (lookup height) */ 41 }; 42 43 static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length); 44 45 /** 46 * gfs2_unstuffer_page - unstuff a stuffed inode into a block cached by a page 47 * @ip: the inode 48 * @dibh: the dinode buffer 49 * @block: the block number that was allocated 50 * @page: The (optional) page. This is looked up if @page is NULL 51 * 52 * Returns: errno 53 */ 54 55 static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh, 56 u64 block, struct page *page) 57 { 58 struct inode *inode = &ip->i_inode; 59 struct buffer_head *bh; 60 int release = 0; 61 62 if (!page || page->index) { 63 page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS); 64 if (!page) 65 return -ENOMEM; 66 release = 1; 67 } 68 69 if (!PageUptodate(page)) { 70 void *kaddr = kmap(page); 71 u64 dsize = i_size_read(inode); 72 73 if (dsize > gfs2_max_stuffed_size(ip)) 74 dsize = gfs2_max_stuffed_size(ip); 75 76 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize); 77 memset(kaddr + dsize, 0, PAGE_SIZE - dsize); 78 kunmap(page); 79 80 SetPageUptodate(page); 81 } 82 83 if (!page_has_buffers(page)) 84 create_empty_buffers(page, BIT(inode->i_blkbits), 85 BIT(BH_Uptodate)); 86 87 bh = page_buffers(page); 88 89 if (!buffer_mapped(bh)) 90 map_bh(bh, inode->i_sb, block); 91 92 set_buffer_uptodate(bh); 93 if (gfs2_is_jdata(ip)) 94 gfs2_trans_add_data(ip->i_gl, bh); 95 else { 96 mark_buffer_dirty(bh); 97 gfs2_ordered_add_inode(ip); 98 } 99 100 if (release) { 101 unlock_page(page); 102 put_page(page); 103 } 104 105 return 0; 106 } 107 108 /** 109 * gfs2_unstuff_dinode - Unstuff a dinode when the data has grown too big 110 * @ip: The GFS2 inode to unstuff 111 * @page: The (optional) page. This is looked up if the @page is NULL 112 * 113 * This routine unstuffs a dinode and returns it to a "normal" state such 114 * that the height can be grown in the traditional way. 115 * 116 * Returns: errno 117 */ 118 119 int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page) 120 { 121 struct buffer_head *bh, *dibh; 122 struct gfs2_dinode *di; 123 u64 block = 0; 124 int isdir = gfs2_is_dir(ip); 125 int error; 126 127 down_write(&ip->i_rw_mutex); 128 129 error = gfs2_meta_inode_buffer(ip, &dibh); 130 if (error) 131 goto out; 132 133 if (i_size_read(&ip->i_inode)) { 134 /* Get a free block, fill it with the stuffed data, 135 and write it out to disk */ 136 137 unsigned int n = 1; 138 error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL); 139 if (error) 140 goto out_brelse; 141 if (isdir) { 142 gfs2_trans_remove_revoke(GFS2_SB(&ip->i_inode), block, 1); 143 error = gfs2_dir_get_new_buffer(ip, block, &bh); 144 if (error) 145 goto out_brelse; 146 gfs2_buffer_copy_tail(bh, sizeof(struct gfs2_meta_header), 147 dibh, sizeof(struct gfs2_dinode)); 148 brelse(bh); 149 } else { 150 error = gfs2_unstuffer_page(ip, dibh, block, page); 151 if (error) 152 goto out_brelse; 153 } 154 } 155 156 /* Set up the pointer to the new block */ 157 158 gfs2_trans_add_meta(ip->i_gl, dibh); 159 di = (struct gfs2_dinode *)dibh->b_data; 160 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode)); 161 162 if (i_size_read(&ip->i_inode)) { 163 *(__be64 *)(di + 1) = cpu_to_be64(block); 164 gfs2_add_inode_blocks(&ip->i_inode, 1); 165 di->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode)); 166 } 167 168 ip->i_height = 1; 169 di->di_height = cpu_to_be16(1); 170 171 out_brelse: 172 brelse(dibh); 173 out: 174 up_write(&ip->i_rw_mutex); 175 return error; 176 } 177 178 179 /** 180 * find_metapath - Find path through the metadata tree 181 * @sdp: The superblock 182 * @block: The disk block to look up 183 * @mp: The metapath to return the result in 184 * @height: The pre-calculated height of the metadata tree 185 * 186 * This routine returns a struct metapath structure that defines a path 187 * through the metadata of inode "ip" to get to block "block". 188 * 189 * Example: 190 * Given: "ip" is a height 3 file, "offset" is 101342453, and this is a 191 * filesystem with a blocksize of 4096. 192 * 193 * find_metapath() would return a struct metapath structure set to: 194 * mp_fheight = 3, mp_list[0] = 0, mp_list[1] = 48, and mp_list[2] = 165. 195 * 196 * That means that in order to get to the block containing the byte at 197 * offset 101342453, we would load the indirect block pointed to by pointer 198 * 0 in the dinode. We would then load the indirect block pointed to by 199 * pointer 48 in that indirect block. We would then load the data block 200 * pointed to by pointer 165 in that indirect block. 201 * 202 * ---------------------------------------- 203 * | Dinode | | 204 * | | 4| 205 * | |0 1 2 3 4 5 9| 206 * | | 6| 207 * ---------------------------------------- 208 * | 209 * | 210 * V 211 * ---------------------------------------- 212 * | Indirect Block | 213 * | 5| 214 * | 4 4 4 4 4 5 5 1| 215 * |0 5 6 7 8 9 0 1 2| 216 * ---------------------------------------- 217 * | 218 * | 219 * V 220 * ---------------------------------------- 221 * | Indirect Block | 222 * | 1 1 1 1 1 5| 223 * | 6 6 6 6 6 1| 224 * |0 3 4 5 6 7 2| 225 * ---------------------------------------- 226 * | 227 * | 228 * V 229 * ---------------------------------------- 230 * | Data block containing offset | 231 * | 101342453 | 232 * | | 233 * | | 234 * ---------------------------------------- 235 * 236 */ 237 238 static void find_metapath(const struct gfs2_sbd *sdp, u64 block, 239 struct metapath *mp, unsigned int height) 240 { 241 unsigned int i; 242 243 mp->mp_fheight = height; 244 for (i = height; i--;) 245 mp->mp_list[i] = do_div(block, sdp->sd_inptrs); 246 } 247 248 static inline unsigned int metapath_branch_start(const struct metapath *mp) 249 { 250 if (mp->mp_list[0] == 0) 251 return 2; 252 return 1; 253 } 254 255 /** 256 * metaptr1 - Return the first possible metadata pointer in a metapath buffer 257 * @height: The metadata height (0 = dinode) 258 * @mp: The metapath 259 */ 260 static inline __be64 *metaptr1(unsigned int height, const struct metapath *mp) 261 { 262 struct buffer_head *bh = mp->mp_bh[height]; 263 if (height == 0) 264 return ((__be64 *)(bh->b_data + sizeof(struct gfs2_dinode))); 265 return ((__be64 *)(bh->b_data + sizeof(struct gfs2_meta_header))); 266 } 267 268 /** 269 * metapointer - Return pointer to start of metadata in a buffer 270 * @height: The metadata height (0 = dinode) 271 * @mp: The metapath 272 * 273 * Return a pointer to the block number of the next height of the metadata 274 * tree given a buffer containing the pointer to the current height of the 275 * metadata tree. 276 */ 277 278 static inline __be64 *metapointer(unsigned int height, const struct metapath *mp) 279 { 280 __be64 *p = metaptr1(height, mp); 281 return p + mp->mp_list[height]; 282 } 283 284 static inline const __be64 *metaend(unsigned int height, const struct metapath *mp) 285 { 286 const struct buffer_head *bh = mp->mp_bh[height]; 287 return (const __be64 *)(bh->b_data + bh->b_size); 288 } 289 290 static void clone_metapath(struct metapath *clone, struct metapath *mp) 291 { 292 unsigned int hgt; 293 294 *clone = *mp; 295 for (hgt = 0; hgt < mp->mp_aheight; hgt++) 296 get_bh(clone->mp_bh[hgt]); 297 } 298 299 static void gfs2_metapath_ra(struct gfs2_glock *gl, __be64 *start, __be64 *end) 300 { 301 const __be64 *t; 302 303 for (t = start; t < end; t++) { 304 struct buffer_head *rabh; 305 306 if (!*t) 307 continue; 308 309 rabh = gfs2_getbuf(gl, be64_to_cpu(*t), CREATE); 310 if (trylock_buffer(rabh)) { 311 if (!buffer_uptodate(rabh)) { 312 rabh->b_end_io = end_buffer_read_sync; 313 submit_bh(REQ_OP_READ, 314 REQ_RAHEAD | REQ_META | REQ_PRIO, 315 rabh); 316 continue; 317 } 318 unlock_buffer(rabh); 319 } 320 brelse(rabh); 321 } 322 } 323 324 static int __fillup_metapath(struct gfs2_inode *ip, struct metapath *mp, 325 unsigned int x, unsigned int h) 326 { 327 for (; x < h; x++) { 328 __be64 *ptr = metapointer(x, mp); 329 u64 dblock = be64_to_cpu(*ptr); 330 int ret; 331 332 if (!dblock) 333 break; 334 ret = gfs2_meta_indirect_buffer(ip, x + 1, dblock, &mp->mp_bh[x + 1]); 335 if (ret) 336 return ret; 337 } 338 mp->mp_aheight = x + 1; 339 return 0; 340 } 341 342 /** 343 * lookup_metapath - Walk the metadata tree to a specific point 344 * @ip: The inode 345 * @mp: The metapath 346 * 347 * Assumes that the inode's buffer has already been looked up and 348 * hooked onto mp->mp_bh[0] and that the metapath has been initialised 349 * by find_metapath(). 350 * 351 * If this function encounters part of the tree which has not been 352 * allocated, it returns the current height of the tree at the point 353 * at which it found the unallocated block. Blocks which are found are 354 * added to the mp->mp_bh[] list. 355 * 356 * Returns: error 357 */ 358 359 static int lookup_metapath(struct gfs2_inode *ip, struct metapath *mp) 360 { 361 return __fillup_metapath(ip, mp, 0, ip->i_height - 1); 362 } 363 364 /** 365 * fillup_metapath - fill up buffers for the metadata path to a specific height 366 * @ip: The inode 367 * @mp: The metapath 368 * @h: The height to which it should be mapped 369 * 370 * Similar to lookup_metapath, but does lookups for a range of heights 371 * 372 * Returns: error or the number of buffers filled 373 */ 374 375 static int fillup_metapath(struct gfs2_inode *ip, struct metapath *mp, int h) 376 { 377 unsigned int x = 0; 378 int ret; 379 380 if (h) { 381 /* find the first buffer we need to look up. */ 382 for (x = h - 1; x > 0; x--) { 383 if (mp->mp_bh[x]) 384 break; 385 } 386 } 387 ret = __fillup_metapath(ip, mp, x, h); 388 if (ret) 389 return ret; 390 return mp->mp_aheight - x - 1; 391 } 392 393 static sector_t metapath_to_block(struct gfs2_sbd *sdp, struct metapath *mp) 394 { 395 sector_t factor = 1, block = 0; 396 int hgt; 397 398 for (hgt = mp->mp_fheight - 1; hgt >= 0; hgt--) { 399 if (hgt < mp->mp_aheight) 400 block += mp->mp_list[hgt] * factor; 401 factor *= sdp->sd_inptrs; 402 } 403 return block; 404 } 405 406 static void release_metapath(struct metapath *mp) 407 { 408 int i; 409 410 for (i = 0; i < GFS2_MAX_META_HEIGHT; i++) { 411 if (mp->mp_bh[i] == NULL) 412 break; 413 brelse(mp->mp_bh[i]); 414 mp->mp_bh[i] = NULL; 415 } 416 } 417 418 /** 419 * gfs2_extent_length - Returns length of an extent of blocks 420 * @bh: The metadata block 421 * @ptr: Current position in @bh 422 * @limit: Max extent length to return 423 * @eob: Set to 1 if we hit "end of block" 424 * 425 * Returns: The length of the extent (minimum of one block) 426 */ 427 428 static inline unsigned int gfs2_extent_length(struct buffer_head *bh, __be64 *ptr, size_t limit, int *eob) 429 { 430 const __be64 *end = (__be64 *)(bh->b_data + bh->b_size); 431 const __be64 *first = ptr; 432 u64 d = be64_to_cpu(*ptr); 433 434 *eob = 0; 435 do { 436 ptr++; 437 if (ptr >= end) 438 break; 439 d++; 440 } while(be64_to_cpu(*ptr) == d); 441 if (ptr >= end) 442 *eob = 1; 443 return ptr - first; 444 } 445 446 enum walker_status { WALK_STOP, WALK_FOLLOW, WALK_CONTINUE }; 447 448 /* 449 * gfs2_metadata_walker - walk an indirect block 450 * @mp: Metapath to indirect block 451 * @ptrs: Number of pointers to look at 452 * 453 * When returning WALK_FOLLOW, the walker must update @mp to point at the right 454 * indirect block to follow. 455 */ 456 typedef enum walker_status (*gfs2_metadata_walker)(struct metapath *mp, 457 unsigned int ptrs); 458 459 /* 460 * gfs2_walk_metadata - walk a tree of indirect blocks 461 * @inode: The inode 462 * @mp: Starting point of walk 463 * @max_len: Maximum number of blocks to walk 464 * @walker: Called during the walk 465 * 466 * Returns 1 if the walk was stopped by @walker, 0 if we went past @max_len or 467 * past the end of metadata, and a negative error code otherwise. 468 */ 469 470 static int gfs2_walk_metadata(struct inode *inode, struct metapath *mp, 471 u64 max_len, gfs2_metadata_walker walker) 472 { 473 struct gfs2_inode *ip = GFS2_I(inode); 474 struct gfs2_sbd *sdp = GFS2_SB(inode); 475 u64 factor = 1; 476 unsigned int hgt; 477 int ret; 478 479 /* 480 * The walk starts in the lowest allocated indirect block, which may be 481 * before the position indicated by @mp. Adjust @max_len accordingly 482 * to avoid a short walk. 483 */ 484 for (hgt = mp->mp_fheight - 1; hgt >= mp->mp_aheight; hgt--) { 485 max_len += mp->mp_list[hgt] * factor; 486 mp->mp_list[hgt] = 0; 487 factor *= sdp->sd_inptrs; 488 } 489 490 for (;;) { 491 u16 start = mp->mp_list[hgt]; 492 enum walker_status status; 493 unsigned int ptrs; 494 u64 len; 495 496 /* Walk indirect block. */ 497 ptrs = (hgt >= 1 ? sdp->sd_inptrs : sdp->sd_diptrs) - start; 498 len = ptrs * factor; 499 if (len > max_len) 500 ptrs = DIV_ROUND_UP_ULL(max_len, factor); 501 status = walker(mp, ptrs); 502 switch (status) { 503 case WALK_STOP: 504 return 1; 505 case WALK_FOLLOW: 506 BUG_ON(mp->mp_aheight == mp->mp_fheight); 507 ptrs = mp->mp_list[hgt] - start; 508 len = ptrs * factor; 509 break; 510 case WALK_CONTINUE: 511 break; 512 } 513 if (len >= max_len) 514 break; 515 max_len -= len; 516 if (status == WALK_FOLLOW) 517 goto fill_up_metapath; 518 519 lower_metapath: 520 /* Decrease height of metapath. */ 521 brelse(mp->mp_bh[hgt]); 522 mp->mp_bh[hgt] = NULL; 523 mp->mp_list[hgt] = 0; 524 if (!hgt) 525 break; 526 hgt--; 527 factor *= sdp->sd_inptrs; 528 529 /* Advance in metadata tree. */ 530 (mp->mp_list[hgt])++; 531 if (mp->mp_list[hgt] >= sdp->sd_inptrs) { 532 if (!hgt) 533 break; 534 goto lower_metapath; 535 } 536 537 fill_up_metapath: 538 /* Increase height of metapath. */ 539 ret = fillup_metapath(ip, mp, ip->i_height - 1); 540 if (ret < 0) 541 return ret; 542 hgt += ret; 543 for (; ret; ret--) 544 do_div(factor, sdp->sd_inptrs); 545 mp->mp_aheight = hgt + 1; 546 } 547 return 0; 548 } 549 550 static enum walker_status gfs2_hole_walker(struct metapath *mp, 551 unsigned int ptrs) 552 { 553 const __be64 *start, *ptr, *end; 554 unsigned int hgt; 555 556 hgt = mp->mp_aheight - 1; 557 start = metapointer(hgt, mp); 558 end = start + ptrs; 559 560 for (ptr = start; ptr < end; ptr++) { 561 if (*ptr) { 562 mp->mp_list[hgt] += ptr - start; 563 if (mp->mp_aheight == mp->mp_fheight) 564 return WALK_STOP; 565 return WALK_FOLLOW; 566 } 567 } 568 return WALK_CONTINUE; 569 } 570 571 /** 572 * gfs2_hole_size - figure out the size of a hole 573 * @inode: The inode 574 * @lblock: The logical starting block number 575 * @len: How far to look (in blocks) 576 * @mp: The metapath at lblock 577 * @iomap: The iomap to store the hole size in 578 * 579 * This function modifies @mp. 580 * 581 * Returns: errno on error 582 */ 583 static int gfs2_hole_size(struct inode *inode, sector_t lblock, u64 len, 584 struct metapath *mp, struct iomap *iomap) 585 { 586 struct metapath clone; 587 u64 hole_size; 588 int ret; 589 590 clone_metapath(&clone, mp); 591 ret = gfs2_walk_metadata(inode, &clone, len, gfs2_hole_walker); 592 if (ret < 0) 593 goto out; 594 595 if (ret == 1) 596 hole_size = metapath_to_block(GFS2_SB(inode), &clone) - lblock; 597 else 598 hole_size = len; 599 iomap->length = hole_size << inode->i_blkbits; 600 ret = 0; 601 602 out: 603 release_metapath(&clone); 604 return ret; 605 } 606 607 static inline __be64 *gfs2_indirect_init(struct metapath *mp, 608 struct gfs2_glock *gl, unsigned int i, 609 unsigned offset, u64 bn) 610 { 611 __be64 *ptr = (__be64 *)(mp->mp_bh[i - 1]->b_data + 612 ((i > 1) ? sizeof(struct gfs2_meta_header) : 613 sizeof(struct gfs2_dinode))); 614 BUG_ON(i < 1); 615 BUG_ON(mp->mp_bh[i] != NULL); 616 mp->mp_bh[i] = gfs2_meta_new(gl, bn); 617 gfs2_trans_add_meta(gl, mp->mp_bh[i]); 618 gfs2_metatype_set(mp->mp_bh[i], GFS2_METATYPE_IN, GFS2_FORMAT_IN); 619 gfs2_buffer_clear_tail(mp->mp_bh[i], sizeof(struct gfs2_meta_header)); 620 ptr += offset; 621 *ptr = cpu_to_be64(bn); 622 return ptr; 623 } 624 625 enum alloc_state { 626 ALLOC_DATA = 0, 627 ALLOC_GROW_DEPTH = 1, 628 ALLOC_GROW_HEIGHT = 2, 629 /* ALLOC_UNSTUFF = 3, TBD and rather complicated */ 630 }; 631 632 /** 633 * gfs2_iomap_alloc - Build a metadata tree of the requested height 634 * @inode: The GFS2 inode 635 * @iomap: The iomap structure 636 * @mp: The metapath, with proper height information calculated 637 * 638 * In this routine we may have to alloc: 639 * i) Indirect blocks to grow the metadata tree height 640 * ii) Indirect blocks to fill in lower part of the metadata tree 641 * iii) Data blocks 642 * 643 * This function is called after gfs2_iomap_get, which works out the 644 * total number of blocks which we need via gfs2_alloc_size. 645 * 646 * We then do the actual allocation asking for an extent at a time (if 647 * enough contiguous free blocks are available, there will only be one 648 * allocation request per call) and uses the state machine to initialise 649 * the blocks in order. 650 * 651 * Right now, this function will allocate at most one indirect block 652 * worth of data -- with a default block size of 4K, that's slightly 653 * less than 2M. If this limitation is ever removed to allow huge 654 * allocations, we would probably still want to limit the iomap size we 655 * return to avoid stalling other tasks during huge writes; the next 656 * iomap iteration would then find the blocks already allocated. 657 * 658 * Returns: errno on error 659 */ 660 661 static int gfs2_iomap_alloc(struct inode *inode, struct iomap *iomap, 662 struct metapath *mp) 663 { 664 struct gfs2_inode *ip = GFS2_I(inode); 665 struct gfs2_sbd *sdp = GFS2_SB(inode); 666 struct buffer_head *dibh = mp->mp_bh[0]; 667 u64 bn; 668 unsigned n, i, blks, alloced = 0, iblks = 0, branch_start = 0; 669 size_t dblks = iomap->length >> inode->i_blkbits; 670 const unsigned end_of_metadata = mp->mp_fheight - 1; 671 int ret; 672 enum alloc_state state; 673 __be64 *ptr; 674 __be64 zero_bn = 0; 675 676 BUG_ON(mp->mp_aheight < 1); 677 BUG_ON(dibh == NULL); 678 BUG_ON(dblks < 1); 679 680 gfs2_trans_add_meta(ip->i_gl, dibh); 681 682 down_write(&ip->i_rw_mutex); 683 684 if (mp->mp_fheight == mp->mp_aheight) { 685 /* Bottom indirect block exists */ 686 state = ALLOC_DATA; 687 } else { 688 /* Need to allocate indirect blocks */ 689 if (mp->mp_fheight == ip->i_height) { 690 /* Writing into existing tree, extend tree down */ 691 iblks = mp->mp_fheight - mp->mp_aheight; 692 state = ALLOC_GROW_DEPTH; 693 } else { 694 /* Building up tree height */ 695 state = ALLOC_GROW_HEIGHT; 696 iblks = mp->mp_fheight - ip->i_height; 697 branch_start = metapath_branch_start(mp); 698 iblks += (mp->mp_fheight - branch_start); 699 } 700 } 701 702 /* start of the second part of the function (state machine) */ 703 704 blks = dblks + iblks; 705 i = mp->mp_aheight; 706 do { 707 n = blks - alloced; 708 ret = gfs2_alloc_blocks(ip, &bn, &n, 0, NULL); 709 if (ret) 710 goto out; 711 alloced += n; 712 if (state != ALLOC_DATA || gfs2_is_jdata(ip)) 713 gfs2_trans_remove_revoke(sdp, bn, n); 714 switch (state) { 715 /* Growing height of tree */ 716 case ALLOC_GROW_HEIGHT: 717 if (i == 1) { 718 ptr = (__be64 *)(dibh->b_data + 719 sizeof(struct gfs2_dinode)); 720 zero_bn = *ptr; 721 } 722 for (; i - 1 < mp->mp_fheight - ip->i_height && n > 0; 723 i++, n--) 724 gfs2_indirect_init(mp, ip->i_gl, i, 0, bn++); 725 if (i - 1 == mp->mp_fheight - ip->i_height) { 726 i--; 727 gfs2_buffer_copy_tail(mp->mp_bh[i], 728 sizeof(struct gfs2_meta_header), 729 dibh, sizeof(struct gfs2_dinode)); 730 gfs2_buffer_clear_tail(dibh, 731 sizeof(struct gfs2_dinode) + 732 sizeof(__be64)); 733 ptr = (__be64 *)(mp->mp_bh[i]->b_data + 734 sizeof(struct gfs2_meta_header)); 735 *ptr = zero_bn; 736 state = ALLOC_GROW_DEPTH; 737 for(i = branch_start; i < mp->mp_fheight; i++) { 738 if (mp->mp_bh[i] == NULL) 739 break; 740 brelse(mp->mp_bh[i]); 741 mp->mp_bh[i] = NULL; 742 } 743 i = branch_start; 744 } 745 if (n == 0) 746 break; 747 /* fall through - To branching from existing tree */ 748 case ALLOC_GROW_DEPTH: 749 if (i > 1 && i < mp->mp_fheight) 750 gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[i-1]); 751 for (; i < mp->mp_fheight && n > 0; i++, n--) 752 gfs2_indirect_init(mp, ip->i_gl, i, 753 mp->mp_list[i-1], bn++); 754 if (i == mp->mp_fheight) 755 state = ALLOC_DATA; 756 if (n == 0) 757 break; 758 /* fall through - To tree complete, adding data blocks */ 759 case ALLOC_DATA: 760 BUG_ON(n > dblks); 761 BUG_ON(mp->mp_bh[end_of_metadata] == NULL); 762 gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[end_of_metadata]); 763 dblks = n; 764 ptr = metapointer(end_of_metadata, mp); 765 iomap->addr = bn << inode->i_blkbits; 766 iomap->flags |= IOMAP_F_MERGED | IOMAP_F_NEW; 767 while (n-- > 0) 768 *ptr++ = cpu_to_be64(bn++); 769 break; 770 } 771 } while (iomap->addr == IOMAP_NULL_ADDR); 772 773 iomap->type = IOMAP_MAPPED; 774 iomap->length = (u64)dblks << inode->i_blkbits; 775 ip->i_height = mp->mp_fheight; 776 gfs2_add_inode_blocks(&ip->i_inode, alloced); 777 gfs2_dinode_out(ip, dibh->b_data); 778 out: 779 up_write(&ip->i_rw_mutex); 780 return ret; 781 } 782 783 #define IOMAP_F_GFS2_BOUNDARY IOMAP_F_PRIVATE 784 785 /** 786 * gfs2_alloc_size - Compute the maximum allocation size 787 * @inode: The inode 788 * @mp: The metapath 789 * @size: Requested size in blocks 790 * 791 * Compute the maximum size of the next allocation at @mp. 792 * 793 * Returns: size in blocks 794 */ 795 static u64 gfs2_alloc_size(struct inode *inode, struct metapath *mp, u64 size) 796 { 797 struct gfs2_inode *ip = GFS2_I(inode); 798 struct gfs2_sbd *sdp = GFS2_SB(inode); 799 const __be64 *first, *ptr, *end; 800 801 /* 802 * For writes to stuffed files, this function is called twice via 803 * gfs2_iomap_get, before and after unstuffing. The size we return the 804 * first time needs to be large enough to get the reservation and 805 * allocation sizes right. The size we return the second time must 806 * be exact or else gfs2_iomap_alloc won't do the right thing. 807 */ 808 809 if (gfs2_is_stuffed(ip) || mp->mp_fheight != mp->mp_aheight) { 810 unsigned int maxsize = mp->mp_fheight > 1 ? 811 sdp->sd_inptrs : sdp->sd_diptrs; 812 maxsize -= mp->mp_list[mp->mp_fheight - 1]; 813 if (size > maxsize) 814 size = maxsize; 815 return size; 816 } 817 818 first = metapointer(ip->i_height - 1, mp); 819 end = metaend(ip->i_height - 1, mp); 820 if (end - first > size) 821 end = first + size; 822 for (ptr = first; ptr < end; ptr++) { 823 if (*ptr) 824 break; 825 } 826 return ptr - first; 827 } 828 829 /** 830 * gfs2_iomap_get - Map blocks from an inode to disk blocks 831 * @inode: The inode 832 * @pos: Starting position in bytes 833 * @length: Length to map, in bytes 834 * @flags: iomap flags 835 * @iomap: The iomap structure 836 * @mp: The metapath 837 * 838 * Returns: errno 839 */ 840 static int gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length, 841 unsigned flags, struct iomap *iomap, 842 struct metapath *mp) 843 { 844 struct gfs2_inode *ip = GFS2_I(inode); 845 struct gfs2_sbd *sdp = GFS2_SB(inode); 846 loff_t size = i_size_read(inode); 847 __be64 *ptr; 848 sector_t lblock; 849 sector_t lblock_stop; 850 int ret; 851 int eob; 852 u64 len; 853 struct buffer_head *dibh = NULL, *bh; 854 u8 height; 855 856 if (!length) 857 return -EINVAL; 858 859 down_read(&ip->i_rw_mutex); 860 861 ret = gfs2_meta_inode_buffer(ip, &dibh); 862 if (ret) 863 goto unlock; 864 mp->mp_bh[0] = dibh; 865 866 if (gfs2_is_stuffed(ip)) { 867 if (flags & IOMAP_WRITE) { 868 loff_t max_size = gfs2_max_stuffed_size(ip); 869 870 if (pos + length > max_size) 871 goto unstuff; 872 iomap->length = max_size; 873 } else { 874 if (pos >= size) { 875 if (flags & IOMAP_REPORT) { 876 ret = -ENOENT; 877 goto unlock; 878 } else { 879 /* report a hole */ 880 iomap->offset = pos; 881 iomap->length = length; 882 goto do_alloc; 883 } 884 } 885 iomap->length = size; 886 } 887 iomap->addr = (ip->i_no_addr << inode->i_blkbits) + 888 sizeof(struct gfs2_dinode); 889 iomap->type = IOMAP_INLINE; 890 iomap->inline_data = dibh->b_data + sizeof(struct gfs2_dinode); 891 goto out; 892 } 893 894 unstuff: 895 lblock = pos >> inode->i_blkbits; 896 iomap->offset = lblock << inode->i_blkbits; 897 lblock_stop = (pos + length - 1) >> inode->i_blkbits; 898 len = lblock_stop - lblock + 1; 899 iomap->length = len << inode->i_blkbits; 900 901 height = ip->i_height; 902 while ((lblock + 1) * sdp->sd_sb.sb_bsize > sdp->sd_heightsize[height]) 903 height++; 904 find_metapath(sdp, lblock, mp, height); 905 if (height > ip->i_height || gfs2_is_stuffed(ip)) 906 goto do_alloc; 907 908 ret = lookup_metapath(ip, mp); 909 if (ret) 910 goto unlock; 911 912 if (mp->mp_aheight != ip->i_height) 913 goto do_alloc; 914 915 ptr = metapointer(ip->i_height - 1, mp); 916 if (*ptr == 0) 917 goto do_alloc; 918 919 bh = mp->mp_bh[ip->i_height - 1]; 920 len = gfs2_extent_length(bh, ptr, len, &eob); 921 922 iomap->addr = be64_to_cpu(*ptr) << inode->i_blkbits; 923 iomap->length = len << inode->i_blkbits; 924 iomap->type = IOMAP_MAPPED; 925 iomap->flags |= IOMAP_F_MERGED; 926 if (eob) 927 iomap->flags |= IOMAP_F_GFS2_BOUNDARY; 928 929 out: 930 iomap->bdev = inode->i_sb->s_bdev; 931 unlock: 932 up_read(&ip->i_rw_mutex); 933 return ret; 934 935 do_alloc: 936 iomap->addr = IOMAP_NULL_ADDR; 937 iomap->type = IOMAP_HOLE; 938 if (flags & IOMAP_REPORT) { 939 if (pos >= size) 940 ret = -ENOENT; 941 else if (height == ip->i_height) 942 ret = gfs2_hole_size(inode, lblock, len, mp, iomap); 943 else 944 iomap->length = size - pos; 945 } else if (flags & IOMAP_WRITE) { 946 u64 alloc_size; 947 948 if (flags & IOMAP_DIRECT) 949 goto out; /* (see gfs2_file_direct_write) */ 950 951 len = gfs2_alloc_size(inode, mp, len); 952 alloc_size = len << inode->i_blkbits; 953 if (alloc_size < iomap->length) 954 iomap->length = alloc_size; 955 } else { 956 if (pos < size && height == ip->i_height) 957 ret = gfs2_hole_size(inode, lblock, len, mp, iomap); 958 } 959 goto out; 960 } 961 962 /** 963 * gfs2_lblk_to_dblk - convert logical block to disk block 964 * @inode: the inode of the file we're mapping 965 * @lblock: the block relative to the start of the file 966 * @dblock: the returned dblock, if no error 967 * 968 * This function maps a single block from a file logical block (relative to 969 * the start of the file) to a file system absolute block using iomap. 970 * 971 * Returns: the absolute file system block, or an error 972 */ 973 int gfs2_lblk_to_dblk(struct inode *inode, u32 lblock, u64 *dblock) 974 { 975 struct iomap iomap = { }; 976 struct metapath mp = { .mp_aheight = 1, }; 977 loff_t pos = (loff_t)lblock << inode->i_blkbits; 978 int ret; 979 980 ret = gfs2_iomap_get(inode, pos, i_blocksize(inode), 0, &iomap, &mp); 981 release_metapath(&mp); 982 if (ret == 0) 983 *dblock = iomap.addr >> inode->i_blkbits; 984 985 return ret; 986 } 987 988 static int gfs2_write_lock(struct inode *inode) 989 { 990 struct gfs2_inode *ip = GFS2_I(inode); 991 struct gfs2_sbd *sdp = GFS2_SB(inode); 992 int error; 993 994 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh); 995 error = gfs2_glock_nq(&ip->i_gh); 996 if (error) 997 goto out_uninit; 998 if (&ip->i_inode == sdp->sd_rindex) { 999 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); 1000 1001 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, 1002 GL_NOCACHE, &m_ip->i_gh); 1003 if (error) 1004 goto out_unlock; 1005 } 1006 return 0; 1007 1008 out_unlock: 1009 gfs2_glock_dq(&ip->i_gh); 1010 out_uninit: 1011 gfs2_holder_uninit(&ip->i_gh); 1012 return error; 1013 } 1014 1015 static void gfs2_write_unlock(struct inode *inode) 1016 { 1017 struct gfs2_inode *ip = GFS2_I(inode); 1018 struct gfs2_sbd *sdp = GFS2_SB(inode); 1019 1020 if (&ip->i_inode == sdp->sd_rindex) { 1021 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); 1022 1023 gfs2_glock_dq_uninit(&m_ip->i_gh); 1024 } 1025 gfs2_glock_dq_uninit(&ip->i_gh); 1026 } 1027 1028 static int gfs2_iomap_page_prepare(struct inode *inode, loff_t pos, 1029 unsigned len, struct iomap *iomap) 1030 { 1031 unsigned int blockmask = i_blocksize(inode) - 1; 1032 struct gfs2_sbd *sdp = GFS2_SB(inode); 1033 unsigned int blocks; 1034 1035 blocks = ((pos & blockmask) + len + blockmask) >> inode->i_blkbits; 1036 return gfs2_trans_begin(sdp, RES_DINODE + blocks, 0); 1037 } 1038 1039 static void gfs2_iomap_page_done(struct inode *inode, loff_t pos, 1040 unsigned copied, struct page *page, 1041 struct iomap *iomap) 1042 { 1043 struct gfs2_trans *tr = current->journal_info; 1044 struct gfs2_inode *ip = GFS2_I(inode); 1045 struct gfs2_sbd *sdp = GFS2_SB(inode); 1046 1047 if (page && !gfs2_is_stuffed(ip)) 1048 gfs2_page_add_databufs(ip, page, offset_in_page(pos), copied); 1049 1050 if (tr->tr_num_buf_new) 1051 __mark_inode_dirty(inode, I_DIRTY_DATASYNC); 1052 1053 gfs2_trans_end(sdp); 1054 } 1055 1056 static const struct iomap_page_ops gfs2_iomap_page_ops = { 1057 .page_prepare = gfs2_iomap_page_prepare, 1058 .page_done = gfs2_iomap_page_done, 1059 }; 1060 1061 static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos, 1062 loff_t length, unsigned flags, 1063 struct iomap *iomap, 1064 struct metapath *mp) 1065 { 1066 struct gfs2_inode *ip = GFS2_I(inode); 1067 struct gfs2_sbd *sdp = GFS2_SB(inode); 1068 unsigned int data_blocks = 0, ind_blocks = 0, rblocks; 1069 bool unstuff, alloc_required; 1070 int ret; 1071 1072 ret = gfs2_write_lock(inode); 1073 if (ret) 1074 return ret; 1075 1076 unstuff = gfs2_is_stuffed(ip) && 1077 pos + length > gfs2_max_stuffed_size(ip); 1078 1079 ret = gfs2_iomap_get(inode, pos, length, flags, iomap, mp); 1080 if (ret) 1081 goto out_unlock; 1082 1083 alloc_required = unstuff || iomap->type == IOMAP_HOLE; 1084 1085 if (alloc_required || gfs2_is_jdata(ip)) 1086 gfs2_write_calc_reserv(ip, iomap->length, &data_blocks, 1087 &ind_blocks); 1088 1089 if (alloc_required) { 1090 struct gfs2_alloc_parms ap = { 1091 .target = data_blocks + ind_blocks 1092 }; 1093 1094 ret = gfs2_quota_lock_check(ip, &ap); 1095 if (ret) 1096 goto out_unlock; 1097 1098 ret = gfs2_inplace_reserve(ip, &ap); 1099 if (ret) 1100 goto out_qunlock; 1101 } 1102 1103 rblocks = RES_DINODE + ind_blocks; 1104 if (gfs2_is_jdata(ip)) 1105 rblocks += data_blocks; 1106 if (ind_blocks || data_blocks) 1107 rblocks += RES_STATFS + RES_QUOTA; 1108 if (inode == sdp->sd_rindex) 1109 rblocks += 2 * RES_STATFS; 1110 if (alloc_required) 1111 rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks); 1112 1113 if (unstuff || iomap->type == IOMAP_HOLE) { 1114 struct gfs2_trans *tr; 1115 1116 ret = gfs2_trans_begin(sdp, rblocks, 1117 iomap->length >> inode->i_blkbits); 1118 if (ret) 1119 goto out_trans_fail; 1120 1121 if (unstuff) { 1122 ret = gfs2_unstuff_dinode(ip, NULL); 1123 if (ret) 1124 goto out_trans_end; 1125 release_metapath(mp); 1126 ret = gfs2_iomap_get(inode, iomap->offset, 1127 iomap->length, flags, iomap, mp); 1128 if (ret) 1129 goto out_trans_end; 1130 } 1131 1132 if (iomap->type == IOMAP_HOLE) { 1133 ret = gfs2_iomap_alloc(inode, iomap, mp); 1134 if (ret) { 1135 gfs2_trans_end(sdp); 1136 gfs2_inplace_release(ip); 1137 punch_hole(ip, iomap->offset, iomap->length); 1138 goto out_qunlock; 1139 } 1140 } 1141 1142 tr = current->journal_info; 1143 if (tr->tr_num_buf_new) 1144 __mark_inode_dirty(inode, I_DIRTY_DATASYNC); 1145 1146 gfs2_trans_end(sdp); 1147 } 1148 1149 if (gfs2_is_stuffed(ip) || gfs2_is_jdata(ip)) 1150 iomap->page_ops = &gfs2_iomap_page_ops; 1151 return 0; 1152 1153 out_trans_end: 1154 gfs2_trans_end(sdp); 1155 out_trans_fail: 1156 if (alloc_required) 1157 gfs2_inplace_release(ip); 1158 out_qunlock: 1159 if (alloc_required) 1160 gfs2_quota_unlock(ip); 1161 out_unlock: 1162 gfs2_write_unlock(inode); 1163 return ret; 1164 } 1165 1166 static int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length, 1167 unsigned flags, struct iomap *iomap) 1168 { 1169 struct gfs2_inode *ip = GFS2_I(inode); 1170 struct metapath mp = { .mp_aheight = 1, }; 1171 int ret; 1172 1173 iomap->flags |= IOMAP_F_BUFFER_HEAD; 1174 1175 trace_gfs2_iomap_start(ip, pos, length, flags); 1176 if ((flags & IOMAP_WRITE) && !(flags & IOMAP_DIRECT)) { 1177 ret = gfs2_iomap_begin_write(inode, pos, length, flags, iomap, &mp); 1178 } else { 1179 ret = gfs2_iomap_get(inode, pos, length, flags, iomap, &mp); 1180 1181 /* 1182 * Silently fall back to buffered I/O for stuffed files or if 1183 * we've hot a hole (see gfs2_file_direct_write). 1184 */ 1185 if ((flags & IOMAP_WRITE) && (flags & IOMAP_DIRECT) && 1186 iomap->type != IOMAP_MAPPED) 1187 ret = -ENOTBLK; 1188 } 1189 release_metapath(&mp); 1190 trace_gfs2_iomap_end(ip, iomap, ret); 1191 return ret; 1192 } 1193 1194 static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length, 1195 ssize_t written, unsigned flags, struct iomap *iomap) 1196 { 1197 struct gfs2_inode *ip = GFS2_I(inode); 1198 struct gfs2_sbd *sdp = GFS2_SB(inode); 1199 1200 if ((flags & (IOMAP_WRITE | IOMAP_DIRECT)) != IOMAP_WRITE) 1201 goto out; 1202 1203 if (!gfs2_is_stuffed(ip)) 1204 gfs2_ordered_add_inode(ip); 1205 1206 if (inode == sdp->sd_rindex) 1207 adjust_fs_space(inode); 1208 1209 gfs2_inplace_release(ip); 1210 1211 if (length != written && (iomap->flags & IOMAP_F_NEW)) { 1212 /* Deallocate blocks that were just allocated. */ 1213 loff_t blockmask = i_blocksize(inode) - 1; 1214 loff_t end = (pos + length) & ~blockmask; 1215 1216 pos = (pos + written + blockmask) & ~blockmask; 1217 if (pos < end) { 1218 truncate_pagecache_range(inode, pos, end - 1); 1219 punch_hole(ip, pos, end - pos); 1220 } 1221 } 1222 1223 if (ip->i_qadata && ip->i_qadata->qa_qd_num) 1224 gfs2_quota_unlock(ip); 1225 1226 if (unlikely(!written)) 1227 goto out_unlock; 1228 1229 if (iomap->flags & IOMAP_F_SIZE_CHANGED) 1230 mark_inode_dirty(inode); 1231 set_bit(GLF_DIRTY, &ip->i_gl->gl_flags); 1232 1233 out_unlock: 1234 gfs2_write_unlock(inode); 1235 out: 1236 return 0; 1237 } 1238 1239 const struct iomap_ops gfs2_iomap_ops = { 1240 .iomap_begin = gfs2_iomap_begin, 1241 .iomap_end = gfs2_iomap_end, 1242 }; 1243 1244 /** 1245 * gfs2_block_map - Map one or more blocks of an inode to a disk block 1246 * @inode: The inode 1247 * @lblock: The logical block number 1248 * @bh_map: The bh to be mapped 1249 * @create: True if its ok to alloc blocks to satify the request 1250 * 1251 * The size of the requested mapping is defined in bh_map->b_size. 1252 * 1253 * Clears buffer_mapped(bh_map) and leaves bh_map->b_size unchanged 1254 * when @lblock is not mapped. Sets buffer_mapped(bh_map) and 1255 * bh_map->b_size to indicate the size of the mapping when @lblock and 1256 * successive blocks are mapped, up to the requested size. 1257 * 1258 * Sets buffer_boundary() if a read of metadata will be required 1259 * before the next block can be mapped. Sets buffer_new() if new 1260 * blocks were allocated. 1261 * 1262 * Returns: errno 1263 */ 1264 1265 int gfs2_block_map(struct inode *inode, sector_t lblock, 1266 struct buffer_head *bh_map, int create) 1267 { 1268 struct gfs2_inode *ip = GFS2_I(inode); 1269 loff_t pos = (loff_t)lblock << inode->i_blkbits; 1270 loff_t length = bh_map->b_size; 1271 struct metapath mp = { .mp_aheight = 1, }; 1272 struct iomap iomap = { }; 1273 int ret; 1274 1275 clear_buffer_mapped(bh_map); 1276 clear_buffer_new(bh_map); 1277 clear_buffer_boundary(bh_map); 1278 trace_gfs2_bmap(ip, bh_map, lblock, create, 1); 1279 1280 if (create) { 1281 ret = gfs2_iomap_get(inode, pos, length, IOMAP_WRITE, &iomap, &mp); 1282 if (!ret && iomap.type == IOMAP_HOLE) 1283 ret = gfs2_iomap_alloc(inode, &iomap, &mp); 1284 release_metapath(&mp); 1285 } else { 1286 ret = gfs2_iomap_get(inode, pos, length, 0, &iomap, &mp); 1287 release_metapath(&mp); 1288 } 1289 if (ret) 1290 goto out; 1291 1292 if (iomap.length > bh_map->b_size) { 1293 iomap.length = bh_map->b_size; 1294 iomap.flags &= ~IOMAP_F_GFS2_BOUNDARY; 1295 } 1296 if (iomap.addr != IOMAP_NULL_ADDR) 1297 map_bh(bh_map, inode->i_sb, iomap.addr >> inode->i_blkbits); 1298 bh_map->b_size = iomap.length; 1299 if (iomap.flags & IOMAP_F_GFS2_BOUNDARY) 1300 set_buffer_boundary(bh_map); 1301 if (iomap.flags & IOMAP_F_NEW) 1302 set_buffer_new(bh_map); 1303 1304 out: 1305 trace_gfs2_bmap(ip, bh_map, lblock, create, ret); 1306 return ret; 1307 } 1308 1309 /* 1310 * Deprecated: do not use in new code 1311 */ 1312 int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsigned *extlen) 1313 { 1314 struct buffer_head bh = { .b_state = 0, .b_blocknr = 0 }; 1315 int ret; 1316 int create = *new; 1317 1318 BUG_ON(!extlen); 1319 BUG_ON(!dblock); 1320 BUG_ON(!new); 1321 1322 bh.b_size = BIT(inode->i_blkbits + (create ? 0 : 5)); 1323 ret = gfs2_block_map(inode, lblock, &bh, create); 1324 *extlen = bh.b_size >> inode->i_blkbits; 1325 *dblock = bh.b_blocknr; 1326 if (buffer_new(&bh)) 1327 *new = 1; 1328 else 1329 *new = 0; 1330 return ret; 1331 } 1332 1333 /** 1334 * gfs2_block_zero_range - Deal with zeroing out data 1335 * 1336 * This is partly borrowed from ext3. 1337 */ 1338 static int gfs2_block_zero_range(struct inode *inode, loff_t from, 1339 unsigned int length) 1340 { 1341 struct address_space *mapping = inode->i_mapping; 1342 struct gfs2_inode *ip = GFS2_I(inode); 1343 unsigned long index = from >> PAGE_SHIFT; 1344 unsigned offset = from & (PAGE_SIZE-1); 1345 unsigned blocksize, iblock, pos; 1346 struct buffer_head *bh; 1347 struct page *page; 1348 int err; 1349 1350 page = find_or_create_page(mapping, index, GFP_NOFS); 1351 if (!page) 1352 return 0; 1353 1354 blocksize = inode->i_sb->s_blocksize; 1355 iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits); 1356 1357 if (!page_has_buffers(page)) 1358 create_empty_buffers(page, blocksize, 0); 1359 1360 /* Find the buffer that contains "offset" */ 1361 bh = page_buffers(page); 1362 pos = blocksize; 1363 while (offset >= pos) { 1364 bh = bh->b_this_page; 1365 iblock++; 1366 pos += blocksize; 1367 } 1368 1369 err = 0; 1370 1371 if (!buffer_mapped(bh)) { 1372 gfs2_block_map(inode, iblock, bh, 0); 1373 /* unmapped? It's a hole - nothing to do */ 1374 if (!buffer_mapped(bh)) 1375 goto unlock; 1376 } 1377 1378 /* Ok, it's mapped. Make sure it's up-to-date */ 1379 if (PageUptodate(page)) 1380 set_buffer_uptodate(bh); 1381 1382 if (!buffer_uptodate(bh)) { 1383 err = -EIO; 1384 ll_rw_block(REQ_OP_READ, 0, 1, &bh); 1385 wait_on_buffer(bh); 1386 /* Uhhuh. Read error. Complain and punt. */ 1387 if (!buffer_uptodate(bh)) 1388 goto unlock; 1389 err = 0; 1390 } 1391 1392 if (gfs2_is_jdata(ip)) 1393 gfs2_trans_add_data(ip->i_gl, bh); 1394 else 1395 gfs2_ordered_add_inode(ip); 1396 1397 zero_user(page, offset, length); 1398 mark_buffer_dirty(bh); 1399 unlock: 1400 unlock_page(page); 1401 put_page(page); 1402 return err; 1403 } 1404 1405 #define GFS2_JTRUNC_REVOKES 8192 1406 1407 /** 1408 * gfs2_journaled_truncate - Wrapper for truncate_pagecache for jdata files 1409 * @inode: The inode being truncated 1410 * @oldsize: The original (larger) size 1411 * @newsize: The new smaller size 1412 * 1413 * With jdata files, we have to journal a revoke for each block which is 1414 * truncated. As a result, we need to split this into separate transactions 1415 * if the number of pages being truncated gets too large. 1416 */ 1417 1418 static int gfs2_journaled_truncate(struct inode *inode, u64 oldsize, u64 newsize) 1419 { 1420 struct gfs2_sbd *sdp = GFS2_SB(inode); 1421 u64 max_chunk = GFS2_JTRUNC_REVOKES * sdp->sd_vfs->s_blocksize; 1422 u64 chunk; 1423 int error; 1424 1425 while (oldsize != newsize) { 1426 struct gfs2_trans *tr; 1427 unsigned int offs; 1428 1429 chunk = oldsize - newsize; 1430 if (chunk > max_chunk) 1431 chunk = max_chunk; 1432 1433 offs = oldsize & ~PAGE_MASK; 1434 if (offs && chunk > PAGE_SIZE) 1435 chunk = offs + ((chunk - offs) & PAGE_MASK); 1436 1437 truncate_pagecache(inode, oldsize - chunk); 1438 oldsize -= chunk; 1439 1440 tr = current->journal_info; 1441 if (!test_bit(TR_TOUCHED, &tr->tr_flags)) 1442 continue; 1443 1444 gfs2_trans_end(sdp); 1445 error = gfs2_trans_begin(sdp, RES_DINODE, GFS2_JTRUNC_REVOKES); 1446 if (error) 1447 return error; 1448 } 1449 1450 return 0; 1451 } 1452 1453 static int trunc_start(struct inode *inode, u64 newsize) 1454 { 1455 struct gfs2_inode *ip = GFS2_I(inode); 1456 struct gfs2_sbd *sdp = GFS2_SB(inode); 1457 struct buffer_head *dibh = NULL; 1458 int journaled = gfs2_is_jdata(ip); 1459 u64 oldsize = inode->i_size; 1460 int error; 1461 1462 if (journaled) 1463 error = gfs2_trans_begin(sdp, RES_DINODE + RES_JDATA, GFS2_JTRUNC_REVOKES); 1464 else 1465 error = gfs2_trans_begin(sdp, RES_DINODE, 0); 1466 if (error) 1467 return error; 1468 1469 error = gfs2_meta_inode_buffer(ip, &dibh); 1470 if (error) 1471 goto out; 1472 1473 gfs2_trans_add_meta(ip->i_gl, dibh); 1474 1475 if (gfs2_is_stuffed(ip)) { 1476 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + newsize); 1477 } else { 1478 unsigned int blocksize = i_blocksize(inode); 1479 unsigned int offs = newsize & (blocksize - 1); 1480 if (offs) { 1481 error = gfs2_block_zero_range(inode, newsize, 1482 blocksize - offs); 1483 if (error) 1484 goto out; 1485 } 1486 ip->i_diskflags |= GFS2_DIF_TRUNC_IN_PROG; 1487 } 1488 1489 i_size_write(inode, newsize); 1490 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode); 1491 gfs2_dinode_out(ip, dibh->b_data); 1492 1493 if (journaled) 1494 error = gfs2_journaled_truncate(inode, oldsize, newsize); 1495 else 1496 truncate_pagecache(inode, newsize); 1497 1498 out: 1499 brelse(dibh); 1500 if (current->journal_info) 1501 gfs2_trans_end(sdp); 1502 return error; 1503 } 1504 1505 int gfs2_iomap_get_alloc(struct inode *inode, loff_t pos, loff_t length, 1506 struct iomap *iomap) 1507 { 1508 struct metapath mp = { .mp_aheight = 1, }; 1509 int ret; 1510 1511 ret = gfs2_iomap_get(inode, pos, length, IOMAP_WRITE, iomap, &mp); 1512 if (!ret && iomap->type == IOMAP_HOLE) 1513 ret = gfs2_iomap_alloc(inode, iomap, &mp); 1514 release_metapath(&mp); 1515 return ret; 1516 } 1517 1518 /** 1519 * sweep_bh_for_rgrps - find an rgrp in a meta buffer and free blocks therein 1520 * @ip: inode 1521 * @rg_gh: holder of resource group glock 1522 * @bh: buffer head to sweep 1523 * @start: starting point in bh 1524 * @end: end point in bh 1525 * @meta: true if bh points to metadata (rather than data) 1526 * @btotal: place to keep count of total blocks freed 1527 * 1528 * We sweep a metadata buffer (provided by the metapath) for blocks we need to 1529 * free, and free them all. However, we do it one rgrp at a time. If this 1530 * block has references to multiple rgrps, we break it into individual 1531 * transactions. This allows other processes to use the rgrps while we're 1532 * focused on a single one, for better concurrency / performance. 1533 * At every transaction boundary, we rewrite the inode into the journal. 1534 * That way the bitmaps are kept consistent with the inode and we can recover 1535 * if we're interrupted by power-outages. 1536 * 1537 * Returns: 0, or return code if an error occurred. 1538 * *btotal has the total number of blocks freed 1539 */ 1540 static int sweep_bh_for_rgrps(struct gfs2_inode *ip, struct gfs2_holder *rd_gh, 1541 struct buffer_head *bh, __be64 *start, __be64 *end, 1542 bool meta, u32 *btotal) 1543 { 1544 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1545 struct gfs2_rgrpd *rgd; 1546 struct gfs2_trans *tr; 1547 __be64 *p; 1548 int blks_outside_rgrp; 1549 u64 bn, bstart, isize_blks; 1550 s64 blen; /* needs to be s64 or gfs2_add_inode_blocks breaks */ 1551 int ret = 0; 1552 bool buf_in_tr = false; /* buffer was added to transaction */ 1553 1554 more_rgrps: 1555 rgd = NULL; 1556 if (gfs2_holder_initialized(rd_gh)) { 1557 rgd = gfs2_glock2rgrp(rd_gh->gh_gl); 1558 gfs2_assert_withdraw(sdp, 1559 gfs2_glock_is_locked_by_me(rd_gh->gh_gl)); 1560 } 1561 blks_outside_rgrp = 0; 1562 bstart = 0; 1563 blen = 0; 1564 1565 for (p = start; p < end; p++) { 1566 if (!*p) 1567 continue; 1568 bn = be64_to_cpu(*p); 1569 1570 if (rgd) { 1571 if (!rgrp_contains_block(rgd, bn)) { 1572 blks_outside_rgrp++; 1573 continue; 1574 } 1575 } else { 1576 rgd = gfs2_blk2rgrpd(sdp, bn, true); 1577 if (unlikely(!rgd)) { 1578 ret = -EIO; 1579 goto out; 1580 } 1581 ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 1582 0, rd_gh); 1583 if (ret) 1584 goto out; 1585 1586 /* Must be done with the rgrp glock held: */ 1587 if (gfs2_rs_active(&ip->i_res) && 1588 rgd == ip->i_res.rs_rbm.rgd) 1589 gfs2_rs_deltree(&ip->i_res); 1590 } 1591 1592 /* The size of our transactions will be unknown until we 1593 actually process all the metadata blocks that relate to 1594 the rgrp. So we estimate. We know it can't be more than 1595 the dinode's i_blocks and we don't want to exceed the 1596 journal flush threshold, sd_log_thresh2. */ 1597 if (current->journal_info == NULL) { 1598 unsigned int jblocks_rqsted, revokes; 1599 1600 jblocks_rqsted = rgd->rd_length + RES_DINODE + 1601 RES_INDIRECT; 1602 isize_blks = gfs2_get_inode_blocks(&ip->i_inode); 1603 if (isize_blks > atomic_read(&sdp->sd_log_thresh2)) 1604 jblocks_rqsted += 1605 atomic_read(&sdp->sd_log_thresh2); 1606 else 1607 jblocks_rqsted += isize_blks; 1608 revokes = jblocks_rqsted; 1609 if (meta) 1610 revokes += end - start; 1611 else if (ip->i_depth) 1612 revokes += sdp->sd_inptrs; 1613 ret = gfs2_trans_begin(sdp, jblocks_rqsted, revokes); 1614 if (ret) 1615 goto out_unlock; 1616 down_write(&ip->i_rw_mutex); 1617 } 1618 /* check if we will exceed the transaction blocks requested */ 1619 tr = current->journal_info; 1620 if (tr->tr_num_buf_new + RES_STATFS + 1621 RES_QUOTA >= atomic_read(&sdp->sd_log_thresh2)) { 1622 /* We set blks_outside_rgrp to ensure the loop will 1623 be repeated for the same rgrp, but with a new 1624 transaction. */ 1625 blks_outside_rgrp++; 1626 /* This next part is tricky. If the buffer was added 1627 to the transaction, we've already set some block 1628 pointers to 0, so we better follow through and free 1629 them, or we will introduce corruption (so break). 1630 This may be impossible, or at least rare, but I 1631 decided to cover the case regardless. 1632 1633 If the buffer was not added to the transaction 1634 (this call), doing so would exceed our transaction 1635 size, so we need to end the transaction and start a 1636 new one (so goto). */ 1637 1638 if (buf_in_tr) 1639 break; 1640 goto out_unlock; 1641 } 1642 1643 gfs2_trans_add_meta(ip->i_gl, bh); 1644 buf_in_tr = true; 1645 *p = 0; 1646 if (bstart + blen == bn) { 1647 blen++; 1648 continue; 1649 } 1650 if (bstart) { 1651 __gfs2_free_blocks(ip, rgd, bstart, (u32)blen, meta); 1652 (*btotal) += blen; 1653 gfs2_add_inode_blocks(&ip->i_inode, -blen); 1654 } 1655 bstart = bn; 1656 blen = 1; 1657 } 1658 if (bstart) { 1659 __gfs2_free_blocks(ip, rgd, bstart, (u32)blen, meta); 1660 (*btotal) += blen; 1661 gfs2_add_inode_blocks(&ip->i_inode, -blen); 1662 } 1663 out_unlock: 1664 if (!ret && blks_outside_rgrp) { /* If buffer still has non-zero blocks 1665 outside the rgrp we just processed, 1666 do it all over again. */ 1667 if (current->journal_info) { 1668 struct buffer_head *dibh; 1669 1670 ret = gfs2_meta_inode_buffer(ip, &dibh); 1671 if (ret) 1672 goto out; 1673 1674 /* Every transaction boundary, we rewrite the dinode 1675 to keep its di_blocks current in case of failure. */ 1676 ip->i_inode.i_mtime = ip->i_inode.i_ctime = 1677 current_time(&ip->i_inode); 1678 gfs2_trans_add_meta(ip->i_gl, dibh); 1679 gfs2_dinode_out(ip, dibh->b_data); 1680 brelse(dibh); 1681 up_write(&ip->i_rw_mutex); 1682 gfs2_trans_end(sdp); 1683 } 1684 gfs2_glock_dq_uninit(rd_gh); 1685 cond_resched(); 1686 goto more_rgrps; 1687 } 1688 out: 1689 return ret; 1690 } 1691 1692 static bool mp_eq_to_hgt(struct metapath *mp, __u16 *list, unsigned int h) 1693 { 1694 if (memcmp(mp->mp_list, list, h * sizeof(mp->mp_list[0]))) 1695 return false; 1696 return true; 1697 } 1698 1699 /** 1700 * find_nonnull_ptr - find a non-null pointer given a metapath and height 1701 * @mp: starting metapath 1702 * @h: desired height to search 1703 * 1704 * Assumes the metapath is valid (with buffers) out to height h. 1705 * Returns: true if a non-null pointer was found in the metapath buffer 1706 * false if all remaining pointers are NULL in the buffer 1707 */ 1708 static bool find_nonnull_ptr(struct gfs2_sbd *sdp, struct metapath *mp, 1709 unsigned int h, 1710 __u16 *end_list, unsigned int end_aligned) 1711 { 1712 struct buffer_head *bh = mp->mp_bh[h]; 1713 __be64 *first, *ptr, *end; 1714 1715 first = metaptr1(h, mp); 1716 ptr = first + mp->mp_list[h]; 1717 end = (__be64 *)(bh->b_data + bh->b_size); 1718 if (end_list && mp_eq_to_hgt(mp, end_list, h)) { 1719 bool keep_end = h < end_aligned; 1720 end = first + end_list[h] + keep_end; 1721 } 1722 1723 while (ptr < end) { 1724 if (*ptr) { /* if we have a non-null pointer */ 1725 mp->mp_list[h] = ptr - first; 1726 h++; 1727 if (h < GFS2_MAX_META_HEIGHT) 1728 mp->mp_list[h] = 0; 1729 return true; 1730 } 1731 ptr++; 1732 } 1733 return false; 1734 } 1735 1736 enum dealloc_states { 1737 DEALLOC_MP_FULL = 0, /* Strip a metapath with all buffers read in */ 1738 DEALLOC_MP_LOWER = 1, /* lower the metapath strip height */ 1739 DEALLOC_FILL_MP = 2, /* Fill in the metapath to the given height. */ 1740 DEALLOC_DONE = 3, /* process complete */ 1741 }; 1742 1743 static inline void 1744 metapointer_range(struct metapath *mp, int height, 1745 __u16 *start_list, unsigned int start_aligned, 1746 __u16 *end_list, unsigned int end_aligned, 1747 __be64 **start, __be64 **end) 1748 { 1749 struct buffer_head *bh = mp->mp_bh[height]; 1750 __be64 *first; 1751 1752 first = metaptr1(height, mp); 1753 *start = first; 1754 if (mp_eq_to_hgt(mp, start_list, height)) { 1755 bool keep_start = height < start_aligned; 1756 *start = first + start_list[height] + keep_start; 1757 } 1758 *end = (__be64 *)(bh->b_data + bh->b_size); 1759 if (end_list && mp_eq_to_hgt(mp, end_list, height)) { 1760 bool keep_end = height < end_aligned; 1761 *end = first + end_list[height] + keep_end; 1762 } 1763 } 1764 1765 static inline bool walk_done(struct gfs2_sbd *sdp, 1766 struct metapath *mp, int height, 1767 __u16 *end_list, unsigned int end_aligned) 1768 { 1769 __u16 end; 1770 1771 if (end_list) { 1772 bool keep_end = height < end_aligned; 1773 if (!mp_eq_to_hgt(mp, end_list, height)) 1774 return false; 1775 end = end_list[height] + keep_end; 1776 } else 1777 end = (height > 0) ? sdp->sd_inptrs : sdp->sd_diptrs; 1778 return mp->mp_list[height] >= end; 1779 } 1780 1781 /** 1782 * punch_hole - deallocate blocks in a file 1783 * @ip: inode to truncate 1784 * @offset: the start of the hole 1785 * @length: the size of the hole (or 0 for truncate) 1786 * 1787 * Punch a hole into a file or truncate a file at a given position. This 1788 * function operates in whole blocks (@offset and @length are rounded 1789 * accordingly); partially filled blocks must be cleared otherwise. 1790 * 1791 * This function works from the bottom up, and from the right to the left. In 1792 * other words, it strips off the highest layer (data) before stripping any of 1793 * the metadata. Doing it this way is best in case the operation is interrupted 1794 * by power failure, etc. The dinode is rewritten in every transaction to 1795 * guarantee integrity. 1796 */ 1797 static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length) 1798 { 1799 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1800 u64 maxsize = sdp->sd_heightsize[ip->i_height]; 1801 struct metapath mp = {}; 1802 struct buffer_head *dibh, *bh; 1803 struct gfs2_holder rd_gh; 1804 unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift; 1805 u64 lblock = (offset + (1 << bsize_shift) - 1) >> bsize_shift; 1806 __u16 start_list[GFS2_MAX_META_HEIGHT]; 1807 __u16 __end_list[GFS2_MAX_META_HEIGHT], *end_list = NULL; 1808 unsigned int start_aligned, uninitialized_var(end_aligned); 1809 unsigned int strip_h = ip->i_height - 1; 1810 u32 btotal = 0; 1811 int ret, state; 1812 int mp_h; /* metapath buffers are read in to this height */ 1813 u64 prev_bnr = 0; 1814 __be64 *start, *end; 1815 1816 if (offset >= maxsize) { 1817 /* 1818 * The starting point lies beyond the allocated meta-data; 1819 * there are no blocks do deallocate. 1820 */ 1821 return 0; 1822 } 1823 1824 /* 1825 * The start position of the hole is defined by lblock, start_list, and 1826 * start_aligned. The end position of the hole is defined by lend, 1827 * end_list, and end_aligned. 1828 * 1829 * start_aligned and end_aligned define down to which height the start 1830 * and end positions are aligned to the metadata tree (i.e., the 1831 * position is a multiple of the metadata granularity at the height 1832 * above). This determines at which heights additional meta pointers 1833 * needs to be preserved for the remaining data. 1834 */ 1835 1836 if (length) { 1837 u64 end_offset = offset + length; 1838 u64 lend; 1839 1840 /* 1841 * Clip the end at the maximum file size for the given height: 1842 * that's how far the metadata goes; files bigger than that 1843 * will have additional layers of indirection. 1844 */ 1845 if (end_offset > maxsize) 1846 end_offset = maxsize; 1847 lend = end_offset >> bsize_shift; 1848 1849 if (lblock >= lend) 1850 return 0; 1851 1852 find_metapath(sdp, lend, &mp, ip->i_height); 1853 end_list = __end_list; 1854 memcpy(end_list, mp.mp_list, sizeof(mp.mp_list)); 1855 1856 for (mp_h = ip->i_height - 1; mp_h > 0; mp_h--) { 1857 if (end_list[mp_h]) 1858 break; 1859 } 1860 end_aligned = mp_h; 1861 } 1862 1863 find_metapath(sdp, lblock, &mp, ip->i_height); 1864 memcpy(start_list, mp.mp_list, sizeof(start_list)); 1865 1866 for (mp_h = ip->i_height - 1; mp_h > 0; mp_h--) { 1867 if (start_list[mp_h]) 1868 break; 1869 } 1870 start_aligned = mp_h; 1871 1872 ret = gfs2_meta_inode_buffer(ip, &dibh); 1873 if (ret) 1874 return ret; 1875 1876 mp.mp_bh[0] = dibh; 1877 ret = lookup_metapath(ip, &mp); 1878 if (ret) 1879 goto out_metapath; 1880 1881 /* issue read-ahead on metadata */ 1882 for (mp_h = 0; mp_h < mp.mp_aheight - 1; mp_h++) { 1883 metapointer_range(&mp, mp_h, start_list, start_aligned, 1884 end_list, end_aligned, &start, &end); 1885 gfs2_metapath_ra(ip->i_gl, start, end); 1886 } 1887 1888 if (mp.mp_aheight == ip->i_height) 1889 state = DEALLOC_MP_FULL; /* We have a complete metapath */ 1890 else 1891 state = DEALLOC_FILL_MP; /* deal with partial metapath */ 1892 1893 ret = gfs2_rindex_update(sdp); 1894 if (ret) 1895 goto out_metapath; 1896 1897 ret = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE); 1898 if (ret) 1899 goto out_metapath; 1900 gfs2_holder_mark_uninitialized(&rd_gh); 1901 1902 mp_h = strip_h; 1903 1904 while (state != DEALLOC_DONE) { 1905 switch (state) { 1906 /* Truncate a full metapath at the given strip height. 1907 * Note that strip_h == mp_h in order to be in this state. */ 1908 case DEALLOC_MP_FULL: 1909 bh = mp.mp_bh[mp_h]; 1910 gfs2_assert_withdraw(sdp, bh); 1911 if (gfs2_assert_withdraw(sdp, 1912 prev_bnr != bh->b_blocknr)) { 1913 fs_emerg(sdp, "inode %llu, block:%llu, i_h:%u," 1914 "s_h:%u, mp_h:%u\n", 1915 (unsigned long long)ip->i_no_addr, 1916 prev_bnr, ip->i_height, strip_h, mp_h); 1917 } 1918 prev_bnr = bh->b_blocknr; 1919 1920 if (gfs2_metatype_check(sdp, bh, 1921 (mp_h ? GFS2_METATYPE_IN : 1922 GFS2_METATYPE_DI))) { 1923 ret = -EIO; 1924 goto out; 1925 } 1926 1927 /* 1928 * Below, passing end_aligned as 0 gives us the 1929 * metapointer range excluding the end point: the end 1930 * point is the first metapath we must not deallocate! 1931 */ 1932 1933 metapointer_range(&mp, mp_h, start_list, start_aligned, 1934 end_list, 0 /* end_aligned */, 1935 &start, &end); 1936 ret = sweep_bh_for_rgrps(ip, &rd_gh, mp.mp_bh[mp_h], 1937 start, end, 1938 mp_h != ip->i_height - 1, 1939 &btotal); 1940 1941 /* If we hit an error or just swept dinode buffer, 1942 just exit. */ 1943 if (ret || !mp_h) { 1944 state = DEALLOC_DONE; 1945 break; 1946 } 1947 state = DEALLOC_MP_LOWER; 1948 break; 1949 1950 /* lower the metapath strip height */ 1951 case DEALLOC_MP_LOWER: 1952 /* We're done with the current buffer, so release it, 1953 unless it's the dinode buffer. Then back up to the 1954 previous pointer. */ 1955 if (mp_h) { 1956 brelse(mp.mp_bh[mp_h]); 1957 mp.mp_bh[mp_h] = NULL; 1958 } 1959 /* If we can't get any lower in height, we've stripped 1960 off all we can. Next step is to back up and start 1961 stripping the previous level of metadata. */ 1962 if (mp_h == 0) { 1963 strip_h--; 1964 memcpy(mp.mp_list, start_list, sizeof(start_list)); 1965 mp_h = strip_h; 1966 state = DEALLOC_FILL_MP; 1967 break; 1968 } 1969 mp.mp_list[mp_h] = 0; 1970 mp_h--; /* search one metadata height down */ 1971 mp.mp_list[mp_h]++; 1972 if (walk_done(sdp, &mp, mp_h, end_list, end_aligned)) 1973 break; 1974 /* Here we've found a part of the metapath that is not 1975 * allocated. We need to search at that height for the 1976 * next non-null pointer. */ 1977 if (find_nonnull_ptr(sdp, &mp, mp_h, end_list, end_aligned)) { 1978 state = DEALLOC_FILL_MP; 1979 mp_h++; 1980 } 1981 /* No more non-null pointers at this height. Back up 1982 to the previous height and try again. */ 1983 break; /* loop around in the same state */ 1984 1985 /* Fill the metapath with buffers to the given height. */ 1986 case DEALLOC_FILL_MP: 1987 /* Fill the buffers out to the current height. */ 1988 ret = fillup_metapath(ip, &mp, mp_h); 1989 if (ret < 0) 1990 goto out; 1991 1992 /* On the first pass, issue read-ahead on metadata. */ 1993 if (mp.mp_aheight > 1 && strip_h == ip->i_height - 1) { 1994 unsigned int height = mp.mp_aheight - 1; 1995 1996 /* No read-ahead for data blocks. */ 1997 if (mp.mp_aheight - 1 == strip_h) 1998 height--; 1999 2000 for (; height >= mp.mp_aheight - ret; height--) { 2001 metapointer_range(&mp, height, 2002 start_list, start_aligned, 2003 end_list, end_aligned, 2004 &start, &end); 2005 gfs2_metapath_ra(ip->i_gl, start, end); 2006 } 2007 } 2008 2009 /* If buffers found for the entire strip height */ 2010 if (mp.mp_aheight - 1 == strip_h) { 2011 state = DEALLOC_MP_FULL; 2012 break; 2013 } 2014 if (mp.mp_aheight < ip->i_height) /* We have a partial height */ 2015 mp_h = mp.mp_aheight - 1; 2016 2017 /* If we find a non-null block pointer, crawl a bit 2018 higher up in the metapath and try again, otherwise 2019 we need to look lower for a new starting point. */ 2020 if (find_nonnull_ptr(sdp, &mp, mp_h, end_list, end_aligned)) 2021 mp_h++; 2022 else 2023 state = DEALLOC_MP_LOWER; 2024 break; 2025 } 2026 } 2027 2028 if (btotal) { 2029 if (current->journal_info == NULL) { 2030 ret = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS + 2031 RES_QUOTA, 0); 2032 if (ret) 2033 goto out; 2034 down_write(&ip->i_rw_mutex); 2035 } 2036 gfs2_statfs_change(sdp, 0, +btotal, 0); 2037 gfs2_quota_change(ip, -(s64)btotal, ip->i_inode.i_uid, 2038 ip->i_inode.i_gid); 2039 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode); 2040 gfs2_trans_add_meta(ip->i_gl, dibh); 2041 gfs2_dinode_out(ip, dibh->b_data); 2042 up_write(&ip->i_rw_mutex); 2043 gfs2_trans_end(sdp); 2044 } 2045 2046 out: 2047 if (gfs2_holder_initialized(&rd_gh)) 2048 gfs2_glock_dq_uninit(&rd_gh); 2049 if (current->journal_info) { 2050 up_write(&ip->i_rw_mutex); 2051 gfs2_trans_end(sdp); 2052 cond_resched(); 2053 } 2054 gfs2_quota_unhold(ip); 2055 out_metapath: 2056 release_metapath(&mp); 2057 return ret; 2058 } 2059 2060 static int trunc_end(struct gfs2_inode *ip) 2061 { 2062 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 2063 struct buffer_head *dibh; 2064 int error; 2065 2066 error = gfs2_trans_begin(sdp, RES_DINODE, 0); 2067 if (error) 2068 return error; 2069 2070 down_write(&ip->i_rw_mutex); 2071 2072 error = gfs2_meta_inode_buffer(ip, &dibh); 2073 if (error) 2074 goto out; 2075 2076 if (!i_size_read(&ip->i_inode)) { 2077 ip->i_height = 0; 2078 ip->i_goal = ip->i_no_addr; 2079 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode)); 2080 gfs2_ordered_del_inode(ip); 2081 } 2082 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode); 2083 ip->i_diskflags &= ~GFS2_DIF_TRUNC_IN_PROG; 2084 2085 gfs2_trans_add_meta(ip->i_gl, dibh); 2086 gfs2_dinode_out(ip, dibh->b_data); 2087 brelse(dibh); 2088 2089 out: 2090 up_write(&ip->i_rw_mutex); 2091 gfs2_trans_end(sdp); 2092 return error; 2093 } 2094 2095 /** 2096 * do_shrink - make a file smaller 2097 * @inode: the inode 2098 * @newsize: the size to make the file 2099 * 2100 * Called with an exclusive lock on @inode. The @size must 2101 * be equal to or smaller than the current inode size. 2102 * 2103 * Returns: errno 2104 */ 2105 2106 static int do_shrink(struct inode *inode, u64 newsize) 2107 { 2108 struct gfs2_inode *ip = GFS2_I(inode); 2109 int error; 2110 2111 error = trunc_start(inode, newsize); 2112 if (error < 0) 2113 return error; 2114 if (gfs2_is_stuffed(ip)) 2115 return 0; 2116 2117 error = punch_hole(ip, newsize, 0); 2118 if (error == 0) 2119 error = trunc_end(ip); 2120 2121 return error; 2122 } 2123 2124 void gfs2_trim_blocks(struct inode *inode) 2125 { 2126 int ret; 2127 2128 ret = do_shrink(inode, inode->i_size); 2129 WARN_ON(ret != 0); 2130 } 2131 2132 /** 2133 * do_grow - Touch and update inode size 2134 * @inode: The inode 2135 * @size: The new size 2136 * 2137 * This function updates the timestamps on the inode and 2138 * may also increase the size of the inode. This function 2139 * must not be called with @size any smaller than the current 2140 * inode size. 2141 * 2142 * Although it is not strictly required to unstuff files here, 2143 * earlier versions of GFS2 have a bug in the stuffed file reading 2144 * code which will result in a buffer overrun if the size is larger 2145 * than the max stuffed file size. In order to prevent this from 2146 * occurring, such files are unstuffed, but in other cases we can 2147 * just update the inode size directly. 2148 * 2149 * Returns: 0 on success, or -ve on error 2150 */ 2151 2152 static int do_grow(struct inode *inode, u64 size) 2153 { 2154 struct gfs2_inode *ip = GFS2_I(inode); 2155 struct gfs2_sbd *sdp = GFS2_SB(inode); 2156 struct gfs2_alloc_parms ap = { .target = 1, }; 2157 struct buffer_head *dibh; 2158 int error; 2159 int unstuff = 0; 2160 2161 if (gfs2_is_stuffed(ip) && size > gfs2_max_stuffed_size(ip)) { 2162 error = gfs2_quota_lock_check(ip, &ap); 2163 if (error) 2164 return error; 2165 2166 error = gfs2_inplace_reserve(ip, &ap); 2167 if (error) 2168 goto do_grow_qunlock; 2169 unstuff = 1; 2170 } 2171 2172 error = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS + RES_RG_BIT + 2173 (unstuff && 2174 gfs2_is_jdata(ip) ? RES_JDATA : 0) + 2175 (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF ? 2176 0 : RES_QUOTA), 0); 2177 if (error) 2178 goto do_grow_release; 2179 2180 if (unstuff) { 2181 error = gfs2_unstuff_dinode(ip, NULL); 2182 if (error) 2183 goto do_end_trans; 2184 } 2185 2186 error = gfs2_meta_inode_buffer(ip, &dibh); 2187 if (error) 2188 goto do_end_trans; 2189 2190 i_size_write(inode, size); 2191 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode); 2192 gfs2_trans_add_meta(ip->i_gl, dibh); 2193 gfs2_dinode_out(ip, dibh->b_data); 2194 brelse(dibh); 2195 2196 do_end_trans: 2197 gfs2_trans_end(sdp); 2198 do_grow_release: 2199 if (unstuff) { 2200 gfs2_inplace_release(ip); 2201 do_grow_qunlock: 2202 gfs2_quota_unlock(ip); 2203 } 2204 return error; 2205 } 2206 2207 /** 2208 * gfs2_setattr_size - make a file a given size 2209 * @inode: the inode 2210 * @newsize: the size to make the file 2211 * 2212 * The file size can grow, shrink, or stay the same size. This 2213 * is called holding i_rwsem and an exclusive glock on the inode 2214 * in question. 2215 * 2216 * Returns: errno 2217 */ 2218 2219 int gfs2_setattr_size(struct inode *inode, u64 newsize) 2220 { 2221 struct gfs2_inode *ip = GFS2_I(inode); 2222 int ret; 2223 2224 BUG_ON(!S_ISREG(inode->i_mode)); 2225 2226 ret = inode_newsize_ok(inode, newsize); 2227 if (ret) 2228 return ret; 2229 2230 inode_dio_wait(inode); 2231 2232 ret = gfs2_rsqa_alloc(ip); 2233 if (ret) 2234 goto out; 2235 2236 if (newsize >= inode->i_size) { 2237 ret = do_grow(inode, newsize); 2238 goto out; 2239 } 2240 2241 ret = do_shrink(inode, newsize); 2242 out: 2243 gfs2_rsqa_delete(ip, NULL); 2244 return ret; 2245 } 2246 2247 int gfs2_truncatei_resume(struct gfs2_inode *ip) 2248 { 2249 int error; 2250 error = punch_hole(ip, i_size_read(&ip->i_inode), 0); 2251 if (!error) 2252 error = trunc_end(ip); 2253 return error; 2254 } 2255 2256 int gfs2_file_dealloc(struct gfs2_inode *ip) 2257 { 2258 return punch_hole(ip, 0, 0); 2259 } 2260 2261 /** 2262 * gfs2_free_journal_extents - Free cached journal bmap info 2263 * @jd: The journal 2264 * 2265 */ 2266 2267 void gfs2_free_journal_extents(struct gfs2_jdesc *jd) 2268 { 2269 struct gfs2_journal_extent *jext; 2270 2271 while(!list_empty(&jd->extent_list)) { 2272 jext = list_entry(jd->extent_list.next, struct gfs2_journal_extent, list); 2273 list_del(&jext->list); 2274 kfree(jext); 2275 } 2276 } 2277 2278 /** 2279 * gfs2_add_jextent - Add or merge a new extent to extent cache 2280 * @jd: The journal descriptor 2281 * @lblock: The logical block at start of new extent 2282 * @dblock: The physical block at start of new extent 2283 * @blocks: Size of extent in fs blocks 2284 * 2285 * Returns: 0 on success or -ENOMEM 2286 */ 2287 2288 static int gfs2_add_jextent(struct gfs2_jdesc *jd, u64 lblock, u64 dblock, u64 blocks) 2289 { 2290 struct gfs2_journal_extent *jext; 2291 2292 if (!list_empty(&jd->extent_list)) { 2293 jext = list_entry(jd->extent_list.prev, struct gfs2_journal_extent, list); 2294 if ((jext->dblock + jext->blocks) == dblock) { 2295 jext->blocks += blocks; 2296 return 0; 2297 } 2298 } 2299 2300 jext = kzalloc(sizeof(struct gfs2_journal_extent), GFP_NOFS); 2301 if (jext == NULL) 2302 return -ENOMEM; 2303 jext->dblock = dblock; 2304 jext->lblock = lblock; 2305 jext->blocks = blocks; 2306 list_add_tail(&jext->list, &jd->extent_list); 2307 jd->nr_extents++; 2308 return 0; 2309 } 2310 2311 /** 2312 * gfs2_map_journal_extents - Cache journal bmap info 2313 * @sdp: The super block 2314 * @jd: The journal to map 2315 * 2316 * Create a reusable "extent" mapping from all logical 2317 * blocks to all physical blocks for the given journal. This will save 2318 * us time when writing journal blocks. Most journals will have only one 2319 * extent that maps all their logical blocks. That's because gfs2.mkfs 2320 * arranges the journal blocks sequentially to maximize performance. 2321 * So the extent would map the first block for the entire file length. 2322 * However, gfs2_jadd can happen while file activity is happening, so 2323 * those journals may not be sequential. Less likely is the case where 2324 * the users created their own journals by mounting the metafs and 2325 * laying it out. But it's still possible. These journals might have 2326 * several extents. 2327 * 2328 * Returns: 0 on success, or error on failure 2329 */ 2330 2331 int gfs2_map_journal_extents(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd) 2332 { 2333 u64 lblock = 0; 2334 u64 lblock_stop; 2335 struct gfs2_inode *ip = GFS2_I(jd->jd_inode); 2336 struct buffer_head bh; 2337 unsigned int shift = sdp->sd_sb.sb_bsize_shift; 2338 u64 size; 2339 int rc; 2340 ktime_t start, end; 2341 2342 start = ktime_get(); 2343 lblock_stop = i_size_read(jd->jd_inode) >> shift; 2344 size = (lblock_stop - lblock) << shift; 2345 jd->nr_extents = 0; 2346 WARN_ON(!list_empty(&jd->extent_list)); 2347 2348 do { 2349 bh.b_state = 0; 2350 bh.b_blocknr = 0; 2351 bh.b_size = size; 2352 rc = gfs2_block_map(jd->jd_inode, lblock, &bh, 0); 2353 if (rc || !buffer_mapped(&bh)) 2354 goto fail; 2355 rc = gfs2_add_jextent(jd, lblock, bh.b_blocknr, bh.b_size >> shift); 2356 if (rc) 2357 goto fail; 2358 size -= bh.b_size; 2359 lblock += (bh.b_size >> ip->i_inode.i_blkbits); 2360 } while(size > 0); 2361 2362 end = ktime_get(); 2363 fs_info(sdp, "journal %d mapped with %u extents in %lldms\n", jd->jd_jid, 2364 jd->nr_extents, ktime_ms_delta(end, start)); 2365 return 0; 2366 2367 fail: 2368 fs_warn(sdp, "error %d mapping journal %u at offset %llu (extent %u)\n", 2369 rc, jd->jd_jid, 2370 (unsigned long long)(i_size_read(jd->jd_inode) - size), 2371 jd->nr_extents); 2372 fs_warn(sdp, "bmap=%d lblock=%llu block=%llu, state=0x%08lx, size=%llu\n", 2373 rc, (unsigned long long)lblock, (unsigned long long)bh.b_blocknr, 2374 bh.b_state, (unsigned long long)bh.b_size); 2375 gfs2_free_journal_extents(jd); 2376 return rc; 2377 } 2378 2379 /** 2380 * gfs2_write_alloc_required - figure out if a write will require an allocation 2381 * @ip: the file being written to 2382 * @offset: the offset to write to 2383 * @len: the number of bytes being written 2384 * 2385 * Returns: 1 if an alloc is required, 0 otherwise 2386 */ 2387 2388 int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset, 2389 unsigned int len) 2390 { 2391 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 2392 struct buffer_head bh; 2393 unsigned int shift; 2394 u64 lblock, lblock_stop, size; 2395 u64 end_of_file; 2396 2397 if (!len) 2398 return 0; 2399 2400 if (gfs2_is_stuffed(ip)) { 2401 if (offset + len > gfs2_max_stuffed_size(ip)) 2402 return 1; 2403 return 0; 2404 } 2405 2406 shift = sdp->sd_sb.sb_bsize_shift; 2407 BUG_ON(gfs2_is_dir(ip)); 2408 end_of_file = (i_size_read(&ip->i_inode) + sdp->sd_sb.sb_bsize - 1) >> shift; 2409 lblock = offset >> shift; 2410 lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift; 2411 if (lblock_stop > end_of_file && ip != GFS2_I(sdp->sd_rindex)) 2412 return 1; 2413 2414 size = (lblock_stop - lblock) << shift; 2415 do { 2416 bh.b_state = 0; 2417 bh.b_size = size; 2418 gfs2_block_map(&ip->i_inode, lblock, &bh, 0); 2419 if (!buffer_mapped(&bh)) 2420 return 1; 2421 size -= bh.b_size; 2422 lblock += (bh.b_size >> ip->i_inode.i_blkbits); 2423 } while(size > 0); 2424 2425 return 0; 2426 } 2427 2428 static int stuffed_zero_range(struct inode *inode, loff_t offset, loff_t length) 2429 { 2430 struct gfs2_inode *ip = GFS2_I(inode); 2431 struct buffer_head *dibh; 2432 int error; 2433 2434 if (offset >= inode->i_size) 2435 return 0; 2436 if (offset + length > inode->i_size) 2437 length = inode->i_size - offset; 2438 2439 error = gfs2_meta_inode_buffer(ip, &dibh); 2440 if (error) 2441 return error; 2442 gfs2_trans_add_meta(ip->i_gl, dibh); 2443 memset(dibh->b_data + sizeof(struct gfs2_dinode) + offset, 0, 2444 length); 2445 brelse(dibh); 2446 return 0; 2447 } 2448 2449 static int gfs2_journaled_truncate_range(struct inode *inode, loff_t offset, 2450 loff_t length) 2451 { 2452 struct gfs2_sbd *sdp = GFS2_SB(inode); 2453 loff_t max_chunk = GFS2_JTRUNC_REVOKES * sdp->sd_vfs->s_blocksize; 2454 int error; 2455 2456 while (length) { 2457 struct gfs2_trans *tr; 2458 loff_t chunk; 2459 unsigned int offs; 2460 2461 chunk = length; 2462 if (chunk > max_chunk) 2463 chunk = max_chunk; 2464 2465 offs = offset & ~PAGE_MASK; 2466 if (offs && chunk > PAGE_SIZE) 2467 chunk = offs + ((chunk - offs) & PAGE_MASK); 2468 2469 truncate_pagecache_range(inode, offset, chunk); 2470 offset += chunk; 2471 length -= chunk; 2472 2473 tr = current->journal_info; 2474 if (!test_bit(TR_TOUCHED, &tr->tr_flags)) 2475 continue; 2476 2477 gfs2_trans_end(sdp); 2478 error = gfs2_trans_begin(sdp, RES_DINODE, GFS2_JTRUNC_REVOKES); 2479 if (error) 2480 return error; 2481 } 2482 return 0; 2483 } 2484 2485 int __gfs2_punch_hole(struct file *file, loff_t offset, loff_t length) 2486 { 2487 struct inode *inode = file_inode(file); 2488 struct gfs2_inode *ip = GFS2_I(inode); 2489 struct gfs2_sbd *sdp = GFS2_SB(inode); 2490 int error; 2491 2492 if (gfs2_is_jdata(ip)) 2493 error = gfs2_trans_begin(sdp, RES_DINODE + 2 * RES_JDATA, 2494 GFS2_JTRUNC_REVOKES); 2495 else 2496 error = gfs2_trans_begin(sdp, RES_DINODE, 0); 2497 if (error) 2498 return error; 2499 2500 if (gfs2_is_stuffed(ip)) { 2501 error = stuffed_zero_range(inode, offset, length); 2502 if (error) 2503 goto out; 2504 } else { 2505 unsigned int start_off, end_len, blocksize; 2506 2507 blocksize = i_blocksize(inode); 2508 start_off = offset & (blocksize - 1); 2509 end_len = (offset + length) & (blocksize - 1); 2510 if (start_off) { 2511 unsigned int len = length; 2512 if (length > blocksize - start_off) 2513 len = blocksize - start_off; 2514 error = gfs2_block_zero_range(inode, offset, len); 2515 if (error) 2516 goto out; 2517 if (start_off + length < blocksize) 2518 end_len = 0; 2519 } 2520 if (end_len) { 2521 error = gfs2_block_zero_range(inode, 2522 offset + length - end_len, end_len); 2523 if (error) 2524 goto out; 2525 } 2526 } 2527 2528 if (gfs2_is_jdata(ip)) { 2529 BUG_ON(!current->journal_info); 2530 gfs2_journaled_truncate_range(inode, offset, length); 2531 } else 2532 truncate_pagecache_range(inode, offset, offset + length - 1); 2533 2534 file_update_time(file); 2535 mark_inode_dirty(inode); 2536 2537 if (current->journal_info) 2538 gfs2_trans_end(sdp); 2539 2540 if (!gfs2_is_stuffed(ip)) 2541 error = punch_hole(ip, offset, length); 2542 2543 out: 2544 if (current->journal_info) 2545 gfs2_trans_end(sdp); 2546 return error; 2547 } 2548